2024-11-20 19:24:57,366 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 19:24:57,381 main DEBUG Took 0.013138 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-20 19:24:57,382 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-20 19:24:57,382 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-20 19:24:57,383 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-20 19:24:57,385 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:57,394 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-20 19:24:57,408 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:57,410 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:57,410 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:57,411 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:57,412 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:57,412 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:57,414 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:57,414 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:57,415 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:57,415 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:57,416 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:57,417 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:57,417 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:57,418 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:57,419 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:57,419 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:57,420 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:57,420 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:57,421 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:57,421 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:57,422 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:57,422 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:57,423 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:57,423 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-20 19:24:57,424 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:57,425 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-20 19:24:57,427 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-20 19:24:57,428 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-20 19:24:57,431 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-20 19:24:57,432 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-20 19:24:57,434 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-20 19:24:57,435 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-20 19:24:57,445 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-20 19:24:57,447 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-20 19:24:57,449 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-20 19:24:57,450 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-20 19:24:57,451 main DEBUG createAppenders(={Console}) 2024-11-20 19:24:57,452 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-20 19:24:57,452 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-20 19:24:57,452 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-20 19:24:57,453 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-20 19:24:57,454 main DEBUG OutputStream closed 2024-11-20 19:24:57,454 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-20 19:24:57,455 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-20 19:24:57,455 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-20 19:24:57,551 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-20 19:24:57,554 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-20 19:24:57,555 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-20 19:24:57,557 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-20 19:24:57,558 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-20 19:24:57,559 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-20 19:24:57,559 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-20 19:24:57,560 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-20 19:24:57,560 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-20 19:24:57,561 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-20 19:24:57,562 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-20 19:24:57,562 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-20 19:24:57,563 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-20 19:24:57,563 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-20 19:24:57,563 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-20 19:24:57,564 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-20 19:24:57,564 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-20 19:24:57,565 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-20 19:24:57,567 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20 19:24:57,568 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-20 19:24:57,568 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-20 19:24:57,569 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-20T19:24:57,877 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249 2024-11-20 19:24:57,880 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-20 19:24:57,880 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-20T19:24:57,891 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-11-20T19:24:57,922 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-20T19:24:57,926 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/cluster_11f679c0-5e90-8650-28a2-21724370d870, deleteOnExit=true 2024-11-20T19:24:57,927 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-20T19:24:57,928 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/test.cache.data in system properties and HBase conf 2024-11-20T19:24:57,929 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/hadoop.tmp.dir in system properties and HBase conf 2024-11-20T19:24:57,930 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/hadoop.log.dir in system properties and HBase conf 2024-11-20T19:24:57,931 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-20T19:24:57,932 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-20T19:24:57,932 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-20T19:24:58,044 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-20T19:24:58,161 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-20T19:24:58,167 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-20T19:24:58,168 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-20T19:24:58,169 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-20T19:24:58,169 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T19:24:58,170 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-20T19:24:58,171 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-20T19:24:58,172 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-20T19:24:58,172 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T19:24:58,173 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-20T19:24:58,173 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/nfs.dump.dir in system properties and HBase conf 2024-11-20T19:24:58,174 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/java.io.tmpdir in system properties and HBase conf 2024-11-20T19:24:58,175 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-20T19:24:58,175 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-20T19:24:58,176 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-20T19:24:59,358 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-20T19:24:59,425 INFO [Time-limited test {}] log.Log(170): Logging initialized @2871ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-20T19:24:59,487 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T19:24:59,547 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T19:24:59,570 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T19:24:59,570 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T19:24:59,572 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T19:24:59,584 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T19:24:59,587 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/hadoop.log.dir/,AVAILABLE} 2024-11-20T19:24:59,588 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T19:24:59,782 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/java.io.tmpdir/jetty-localhost-35743-hadoop-hdfs-3_4_1-tests_jar-_-any-3682802669332229177/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T19:24:59,788 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:35743} 2024-11-20T19:24:59,789 INFO [Time-limited test {}] server.Server(415): Started @3236ms 2024-11-20T19:25:00,311 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-20T19:25:00,320 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-20T19:25:00,321 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-20T19:25:00,321 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-20T19:25:00,321 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-20T19:25:00,322 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/hadoop.log.dir/,AVAILABLE} 2024-11-20T19:25:00,322 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-20T19:25:00,420 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/java.io.tmpdir/jetty-localhost-41475-hadoop-hdfs-3_4_1-tests_jar-_-any-16816525961558164846/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T19:25:00,421 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:41475} 2024-11-20T19:25:00,421 INFO [Time-limited test {}] server.Server(415): Started @3868ms 2024-11-20T19:25:00,469 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-20T19:25:01,399 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/cluster_11f679c0-5e90-8650-28a2-21724370d870/dfs/data/data1/current/BP-241231531-172.17.0.2-1732130698820/current, will proceed with Du for space computation calculation, 2024-11-20T19:25:01,399 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/cluster_11f679c0-5e90-8650-28a2-21724370d870/dfs/data/data2/current/BP-241231531-172.17.0.2-1732130698820/current, will proceed with Du for space computation calculation, 2024-11-20T19:25:01,432 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-20T19:25:01,489 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ff18ab5f9a4459b with lease ID 0x78c1154469ee3e64: Processing first storage report for DS-670dd707-aa81-4325-a709-18eec342bc77 from datanode DatanodeRegistration(127.0.0.1:46289, datanodeUuid=c3d568ab-dda1-4760-b2a8-9a923557015c, infoPort=33411, infoSecurePort=0, ipcPort=33825, storageInfo=lv=-57;cid=testClusterID;nsid=710711882;c=1732130698820) 2024-11-20T19:25:01,491 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ff18ab5f9a4459b with lease ID 0x78c1154469ee3e64: from storage DS-670dd707-aa81-4325-a709-18eec342bc77 node DatanodeRegistration(127.0.0.1:46289, datanodeUuid=c3d568ab-dda1-4760-b2a8-9a923557015c, infoPort=33411, infoSecurePort=0, ipcPort=33825, storageInfo=lv=-57;cid=testClusterID;nsid=710711882;c=1732130698820), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-20T19:25:01,491 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8ff18ab5f9a4459b with lease ID 0x78c1154469ee3e64: Processing first storage report for DS-ffb12f8c-dbb1-4691-8f78-dca8f47d6223 from datanode DatanodeRegistration(127.0.0.1:46289, datanodeUuid=c3d568ab-dda1-4760-b2a8-9a923557015c, infoPort=33411, infoSecurePort=0, ipcPort=33825, storageInfo=lv=-57;cid=testClusterID;nsid=710711882;c=1732130698820) 2024-11-20T19:25:01,491 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8ff18ab5f9a4459b with lease ID 0x78c1154469ee3e64: from storage DS-ffb12f8c-dbb1-4691-8f78-dca8f47d6223 node DatanodeRegistration(127.0.0.1:46289, datanodeUuid=c3d568ab-dda1-4760-b2a8-9a923557015c, infoPort=33411, infoSecurePort=0, ipcPort=33825, storageInfo=lv=-57;cid=testClusterID;nsid=710711882;c=1732130698820), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-20T19:25:01,578 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249 2024-11-20T19:25:01,669 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/cluster_11f679c0-5e90-8650-28a2-21724370d870/zookeeper_0, clientPort=50476, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/cluster_11f679c0-5e90-8650-28a2-21724370d870/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/cluster_11f679c0-5e90-8650-28a2-21724370d870/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-20T19:25:01,682 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=50476 2024-11-20T19:25:01,692 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T19:25:01,695 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T19:25:01,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741825_1001 (size=7) 2024-11-20T19:25:02,373 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d with version=8 2024-11-20T19:25:02,373 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/hbase-staging 2024-11-20T19:25:02,501 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-20T19:25:02,748 INFO [Time-limited test {}] client.ConnectionUtils(129): master/db9c3a6c6492:0 server-side Connection retries=45 2024-11-20T19:25:02,763 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T19:25:02,764 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T19:25:02,764 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T19:25:02,764 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T19:25:02,764 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T19:25:02,918 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T19:25:02,983 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-20T19:25:02,992 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-20T19:25:02,995 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T19:25:03,017 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 52562 (auto-detected) 2024-11-20T19:25:03,018 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-20T19:25:03,035 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36861 2024-11-20T19:25:03,042 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T19:25:03,044 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T19:25:03,055 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:36861 connecting to ZooKeeper ensemble=127.0.0.1:50476 2024-11-20T19:25:03,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:368610x0, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T19:25:03,113 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36861-0x1015afea3c50000 connected 2024-11-20T19:25:03,190 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T19:25:03,193 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T19:25:03,197 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T19:25:03,201 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36861 2024-11-20T19:25:03,201 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36861 2024-11-20T19:25:03,202 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36861 2024-11-20T19:25:03,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36861 2024-11-20T19:25:03,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36861 2024-11-20T19:25:03,210 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d, hbase.cluster.distributed=false 2024-11-20T19:25:03,277 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/db9c3a6c6492:0 server-side Connection retries=45 2024-11-20T19:25:03,277 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T19:25:03,278 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-20T19:25:03,278 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-20T19:25:03,278 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-20T19:25:03,278 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-20T19:25:03,280 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-20T19:25:03,283 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-20T19:25:03,284 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35979 2024-11-20T19:25:03,286 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-20T19:25:03,290 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-20T19:25:03,292 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T19:25:03,294 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T19:25:03,298 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:35979 connecting to ZooKeeper ensemble=127.0.0.1:50476 2024-11-20T19:25:03,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:359790x0, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-20T19:25:03,309 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:359790x0, quorum=127.0.0.1:50476, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T19:25:03,309 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35979-0x1015afea3c50001 connected 2024-11-20T19:25:03,311 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T19:25:03,312 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-20T19:25:03,313 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35979 2024-11-20T19:25:03,313 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35979 2024-11-20T19:25:03,313 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35979 2024-11-20T19:25:03,314 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35979 2024-11-20T19:25:03,317 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35979 2024-11-20T19:25:03,319 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/db9c3a6c6492,36861,1732130702494 2024-11-20T19:25:03,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T19:25:03,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T19:25:03,333 DEBUG [M:0;db9c3a6c6492:36861 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;db9c3a6c6492:36861 2024-11-20T19:25:03,333 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/db9c3a6c6492,36861,1732130702494 2024-11-20T19:25:03,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T19:25:03,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-20T19:25:03,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:03,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:03,366 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T19:25:03,366 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-20T19:25:03,369 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/db9c3a6c6492,36861,1732130702494 from backup master directory 2024-11-20T19:25:03,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/db9c3a6c6492,36861,1732130702494 2024-11-20T19:25:03,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T19:25:03,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-20T19:25:03,380 WARN [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T19:25:03,380 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=db9c3a6c6492,36861,1732130702494 2024-11-20T19:25:03,382 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-20T19:25:03,383 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-20T19:25:03,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741826_1002 (size=42) 2024-11-20T19:25:03,845 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/hbase.id with ID: 4e43d5ae-6977-4e86-9017-a65cd504e94c 2024-11-20T19:25:03,888 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-20T19:25:03,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:03,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:03,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741827_1003 (size=196) 2024-11-20T19:25:04,386 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:25:04,388 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-20T19:25:04,403 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:04,407 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T19:25:04,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741828_1004 (size=1189) 2024-11-20T19:25:04,859 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store 2024-11-20T19:25:04,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741829_1005 (size=34) 2024-11-20T19:25:05,284 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-20T19:25:05,285 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:05,286 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T19:25:05,286 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T19:25:05,286 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T19:25:05,286 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T19:25:05,286 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T19:25:05,286 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T19:25:05,286 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T19:25:05,288 WARN [master/db9c3a6c6492:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/.initializing 2024-11-20T19:25:05,288 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/WALs/db9c3a6c6492,36861,1732130702494 2024-11-20T19:25:05,294 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T19:25:05,304 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9c3a6c6492%2C36861%2C1732130702494, suffix=, logDir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/WALs/db9c3a6c6492,36861,1732130702494, archiveDir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/oldWALs, maxLogs=10 2024-11-20T19:25:05,322 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/WALs/db9c3a6c6492,36861,1732130702494/db9c3a6c6492%2C36861%2C1732130702494.1732130705308, exclude list is [], retry=0 2024-11-20T19:25:05,337 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46289,DS-670dd707-aa81-4325-a709-18eec342bc77,DISK] 2024-11-20T19:25:05,339 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-20T19:25:05,370 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/WALs/db9c3a6c6492,36861,1732130702494/db9c3a6c6492%2C36861%2C1732130702494.1732130705308 2024-11-20T19:25:05,371 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33411:33411)] 2024-11-20T19:25:05,371 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:25:05,371 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:05,374 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:05,375 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:05,412 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:05,451 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-20T19:25:05,457 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:05,460 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T19:25:05,462 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:05,467 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-20T19:25:05,467 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:05,468 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:05,469 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:05,473 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-20T19:25:05,474 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:05,475 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:05,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:05,480 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-20T19:25:05,480 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:05,482 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:05,485 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:05,486 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:05,495 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-20T19:25:05,498 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-20T19:25:05,503 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:25:05,505 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67403026, jitterRate=0.004383355379104614}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-20T19:25:05,510 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T19:25:05,511 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-20T19:25:05,539 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54ac4d9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:05,567 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-20T19:25:05,577 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-20T19:25:05,577 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-20T19:25:05,579 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-20T19:25:05,580 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-20T19:25:05,584 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-11-20T19:25:05,585 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-20T19:25:05,607 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-20T19:25:05,619 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-20T19:25:05,629 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-20T19:25:05,631 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-20T19:25:05,632 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-20T19:25:05,641 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-20T19:25:05,643 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-20T19:25:05,648 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-20T19:25:05,658 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-20T19:25:05,659 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-20T19:25:05,671 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-20T19:25:05,689 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-20T19:25:05,700 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-20T19:25:05,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T19:25:05,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-20T19:25:05,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:05,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:05,713 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=db9c3a6c6492,36861,1732130702494, sessionid=0x1015afea3c50000, setting cluster-up flag (Was=false) 2024-11-20T19:25:05,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:05,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:05,767 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-20T19:25:05,770 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9c3a6c6492,36861,1732130702494 2024-11-20T19:25:05,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:05,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:05,820 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-20T19:25:05,822 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=db9c3a6c6492,36861,1732130702494 2024-11-20T19:25:05,913 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-20T19:25:05,919 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-20T19:25:05,922 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-20T19:25:05,928 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: db9c3a6c6492,36861,1732130702494 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-20T19:25:05,930 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/db9c3a6c6492:0, corePoolSize=5, maxPoolSize=5 2024-11-20T19:25:05,931 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/db9c3a6c6492:0, corePoolSize=5, maxPoolSize=5 2024-11-20T19:25:05,931 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/db9c3a6c6492:0, corePoolSize=5, maxPoolSize=5 2024-11-20T19:25:05,931 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/db9c3a6c6492:0, corePoolSize=5, maxPoolSize=5 2024-11-20T19:25:05,931 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/db9c3a6c6492:0, corePoolSize=10, maxPoolSize=10 2024-11-20T19:25:05,931 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:05,931 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/db9c3a6c6492:0, corePoolSize=2, maxPoolSize=2 2024-11-20T19:25:05,932 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:05,933 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732130735933 2024-11-20T19:25:05,934 DEBUG [RS:0;db9c3a6c6492:35979 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;db9c3a6c6492:35979 2024-11-20T19:25:05,935 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-20T19:25:05,935 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(1008): ClusterId : 4e43d5ae-6977-4e86-9017-a65cd504e94c 2024-11-20T19:25:05,936 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-20T19:25:05,937 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T19:25:05,938 DEBUG [RS:0;db9c3a6c6492:35979 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-20T19:25:05,938 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-20T19:25:05,939 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-20T19:25:05,939 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-20T19:25:05,940 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-20T19:25:05,940 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-20T19:25:05,940 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:05,942 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-20T19:25:05,942 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:05,942 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T19:25:05,943 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-20T19:25:05,943 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-20T19:25:05,945 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-20T19:25:05,946 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-20T19:25:05,947 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/db9c3a6c6492:0:becomeActiveMaster-HFileCleaner.large.0-1732130705947,5,FailOnTimeoutGroup] 2024-11-20T19:25:05,947 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/db9c3a6c6492:0:becomeActiveMaster-HFileCleaner.small.0-1732130705947,5,FailOnTimeoutGroup] 2024-11-20T19:25:05,948 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:05,948 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-20T19:25:05,949 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:05,949 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:05,952 DEBUG [RS:0;db9c3a6c6492:35979 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-20T19:25:05,952 DEBUG [RS:0;db9c3a6c6492:35979 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-20T19:25:05,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741831_1007 (size=1039) 2024-11-20T19:25:05,963 DEBUG [RS:0;db9c3a6c6492:35979 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-20T19:25:05,963 DEBUG [RS:0;db9c3a6c6492:35979 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c78097d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:05,965 DEBUG [RS:0;db9c3a6c6492:35979 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d812d41, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9c3a6c6492/172.17.0.2:0 2024-11-20T19:25:05,968 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-20T19:25:05,968 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-20T19:25:05,969 DEBUG [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-20T19:25:05,971 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(3073): reportForDuty to master=db9c3a6c6492,36861,1732130702494 with isa=db9c3a6c6492/172.17.0.2:35979, startcode=1732130703276 2024-11-20T19:25:05,985 DEBUG [RS:0;db9c3a6c6492:35979 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T19:25:06,019 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59925, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T19:25:06,027 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36861 {}] master.ServerManager(332): Checking decommissioned status of RegionServer db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:06,029 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36861 {}] master.ServerManager(486): Registering regionserver=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:06,042 DEBUG [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d 2024-11-20T19:25:06,042 DEBUG [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:34097 2024-11-20T19:25:06,042 DEBUG [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-20T19:25:06,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T19:25:06,054 DEBUG [RS:0;db9c3a6c6492:35979 {}] zookeeper.ZKUtil(111): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:06,054 WARN [RS:0;db9c3a6c6492:35979 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-20T19:25:06,055 INFO [RS:0;db9c3a6c6492:35979 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T19:25:06,055 DEBUG [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/WALs/db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:06,057 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [db9c3a6c6492,35979,1732130703276] 2024-11-20T19:25:06,069 DEBUG [RS:0;db9c3a6c6492:35979 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-20T19:25:06,080 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-20T19:25:06,097 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-20T19:25:06,100 INFO [RS:0;db9c3a6c6492:35979 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-20T19:25:06,100 INFO [RS:0;db9c3a6c6492:35979 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:06,101 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-20T19:25:06,107 INFO [RS:0;db9c3a6c6492:35979 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:06,107 DEBUG [RS:0;db9c3a6c6492:35979 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:06,108 DEBUG [RS:0;db9c3a6c6492:35979 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:06,108 DEBUG [RS:0;db9c3a6c6492:35979 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:06,108 DEBUG [RS:0;db9c3a6c6492:35979 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:06,108 DEBUG [RS:0;db9c3a6c6492:35979 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:06,108 DEBUG [RS:0;db9c3a6c6492:35979 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/db9c3a6c6492:0, corePoolSize=2, maxPoolSize=2 2024-11-20T19:25:06,109 DEBUG [RS:0;db9c3a6c6492:35979 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:06,109 DEBUG [RS:0;db9c3a6c6492:35979 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:06,109 DEBUG [RS:0;db9c3a6c6492:35979 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:06,109 DEBUG [RS:0;db9c3a6c6492:35979 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:06,109 DEBUG [RS:0;db9c3a6c6492:35979 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/db9c3a6c6492:0, corePoolSize=1, maxPoolSize=1 2024-11-20T19:25:06,109 DEBUG [RS:0;db9c3a6c6492:35979 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/db9c3a6c6492:0, corePoolSize=3, maxPoolSize=3 2024-11-20T19:25:06,110 DEBUG [RS:0;db9c3a6c6492:35979 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0, corePoolSize=3, maxPoolSize=3 2024-11-20T19:25:06,111 INFO [RS:0;db9c3a6c6492:35979 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:06,111 INFO [RS:0;db9c3a6c6492:35979 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:06,112 INFO [RS:0;db9c3a6c6492:35979 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:06,112 INFO [RS:0;db9c3a6c6492:35979 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:06,112 INFO [RS:0;db9c3a6c6492:35979 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9c3a6c6492,35979,1732130703276-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T19:25:06,129 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-20T19:25:06,132 INFO [RS:0;db9c3a6c6492:35979 {}] hbase.ChoreService(168): Chore ScheduledChore name=db9c3a6c6492,35979,1732130703276-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:06,160 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.Replication(204): db9c3a6c6492,35979,1732130703276 started 2024-11-20T19:25:06,160 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(1767): Serving as db9c3a6c6492,35979,1732130703276, RpcServer on db9c3a6c6492/172.17.0.2:35979, sessionid=0x1015afea3c50001 2024-11-20T19:25:06,161 DEBUG [RS:0;db9c3a6c6492:35979 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-20T19:25:06,161 DEBUG [RS:0;db9c3a6c6492:35979 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:06,161 DEBUG [RS:0;db9c3a6c6492:35979 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9c3a6c6492,35979,1732130703276' 2024-11-20T19:25:06,162 DEBUG [RS:0;db9c3a6c6492:35979 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-20T19:25:06,162 DEBUG [RS:0;db9c3a6c6492:35979 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-20T19:25:06,163 DEBUG [RS:0;db9c3a6c6492:35979 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-20T19:25:06,163 DEBUG [RS:0;db9c3a6c6492:35979 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-20T19:25:06,163 DEBUG [RS:0;db9c3a6c6492:35979 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:06,164 DEBUG [RS:0;db9c3a6c6492:35979 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'db9c3a6c6492,35979,1732130703276' 2024-11-20T19:25:06,164 DEBUG [RS:0;db9c3a6c6492:35979 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-20T19:25:06,164 DEBUG [RS:0;db9c3a6c6492:35979 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-20T19:25:06,165 DEBUG [RS:0;db9c3a6c6492:35979 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-20T19:25:06,165 INFO [RS:0;db9c3a6c6492:35979 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-20T19:25:06,165 INFO [RS:0;db9c3a6c6492:35979 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-20T19:25:06,273 INFO [RS:0;db9c3a6c6492:35979 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-20T19:25:06,276 INFO [RS:0;db9c3a6c6492:35979 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9c3a6c6492%2C35979%2C1732130703276, suffix=, logDir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/WALs/db9c3a6c6492,35979,1732130703276, archiveDir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/oldWALs, maxLogs=32 2024-11-20T19:25:06,293 DEBUG [RS:0;db9c3a6c6492:35979 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/WALs/db9c3a6c6492,35979,1732130703276/db9c3a6c6492%2C35979%2C1732130703276.1732130706279, exclude list is [], retry=0 2024-11-20T19:25:06,298 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46289,DS-670dd707-aa81-4325-a709-18eec342bc77,DISK] 2024-11-20T19:25:06,301 INFO [RS:0;db9c3a6c6492:35979 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/WALs/db9c3a6c6492,35979,1732130703276/db9c3a6c6492%2C35979%2C1732130703276.1732130706279 2024-11-20T19:25:06,302 DEBUG [RS:0;db9c3a6c6492:35979 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33411:33411)] 2024-11-20T19:25:06,357 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-20T19:25:06,358 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d 2024-11-20T19:25:06,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741833_1009 (size=32) 2024-11-20T19:25:06,775 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:06,780 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T19:25:06,784 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T19:25:06,785 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:06,786 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T19:25:06,787 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T19:25:06,790 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T19:25:06,790 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:06,791 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T19:25:06,791 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T19:25:06,794 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T19:25:06,794 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:06,795 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T19:25:06,796 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740 2024-11-20T19:25:06,797 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740 2024-11-20T19:25:06,800 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:25:06,803 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T19:25:06,807 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:25:06,808 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68057756, jitterRate=0.01413959264755249}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:25:06,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T19:25:06,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T19:25:06,812 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T19:25:06,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T19:25:06,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T19:25:06,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T19:25:06,813 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T19:25:06,813 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T19:25:06,816 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-20T19:25:06,816 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-20T19:25:06,821 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-20T19:25:06,829 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-20T19:25:06,831 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-20T19:25:06,985 DEBUG [db9c3a6c6492:36861 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-20T19:25:06,992 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:06,998 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9c3a6c6492,35979,1732130703276, state=OPENING 2024-11-20T19:25:07,041 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-20T19:25:07,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:07,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:07,050 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T19:25:07,051 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T19:25:07,053 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:25:07,237 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:07,239 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-20T19:25:07,242 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35928, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-20T19:25:07,252 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-20T19:25:07,252 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-20T19:25:07,253 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-20T19:25:07,257 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=db9c3a6c6492%2C35979%2C1732130703276.meta, suffix=.meta, logDir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/WALs/db9c3a6c6492,35979,1732130703276, archiveDir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/oldWALs, maxLogs=32 2024-11-20T19:25:07,273 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/WALs/db9c3a6c6492,35979,1732130703276/db9c3a6c6492%2C35979%2C1732130703276.meta.1732130707259.meta, exclude list is [], retry=0 2024-11-20T19:25:07,277 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46289,DS-670dd707-aa81-4325-a709-18eec342bc77,DISK] 2024-11-20T19:25:07,280 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/WALs/db9c3a6c6492,35979,1732130703276/db9c3a6c6492%2C35979%2C1732130703276.meta.1732130707259.meta 2024-11-20T19:25:07,280 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33411:33411)] 2024-11-20T19:25:07,281 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:25:07,282 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-20T19:25:07,330 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-20T19:25:07,334 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-20T19:25:07,339 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-20T19:25:07,339 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:07,339 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-20T19:25:07,339 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-20T19:25:07,342 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-20T19:25:07,344 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-20T19:25:07,344 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:07,345 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T19:25:07,345 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-20T19:25:07,347 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-20T19:25:07,347 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:07,348 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T19:25:07,348 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-20T19:25:07,349 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-20T19:25:07,349 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:07,350 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-20T19:25:07,351 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740 2024-11-20T19:25:07,354 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740 2024-11-20T19:25:07,357 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:25:07,359 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-20T19:25:07,361 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67107734, jitterRate=-1.683831214904785E-5}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:25:07,363 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-20T19:25:07,370 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732130707231 2024-11-20T19:25:07,380 DEBUG [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-20T19:25:07,381 INFO [RS_OPEN_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-20T19:25:07,382 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:07,384 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as db9c3a6c6492,35979,1732130703276, state=OPEN 2024-11-20T19:25:07,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T19:25:07,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-20T19:25:07,417 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T19:25:07,417 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-20T19:25:07,425 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-20T19:25:07,425 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=db9c3a6c6492,35979,1732130703276 in 364 msec 2024-11-20T19:25:07,433 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-20T19:25:07,433 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 605 msec 2024-11-20T19:25:07,442 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5740 sec 2024-11-20T19:25:07,442 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732130707442, completionTime=-1 2024-11-20T19:25:07,443 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-20T19:25:07,443 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-20T19:25:07,485 DEBUG [hconnection-0x571b1d6e-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:07,488 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35938, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:07,501 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-20T19:25:07,501 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732130767501 2024-11-20T19:25:07,501 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732130827501 2024-11-20T19:25:07,501 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 57 msec 2024-11-20T19:25:07,535 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9c3a6c6492,36861,1732130702494-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:07,536 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9c3a6c6492,36861,1732130702494-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:07,536 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9c3a6c6492,36861,1732130702494-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:07,537 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-db9c3a6c6492:36861, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:07,538 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:07,544 DEBUG [master/db9c3a6c6492:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-20T19:25:07,546 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-20T19:25:07,547 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-20T19:25:07,552 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-20T19:25:07,555 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T19:25:07,556 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:07,558 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T19:25:07,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741835_1011 (size=358) 2024-11-20T19:25:07,978 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d21e0da06747a4b3da8e29803090bc10, NAME => 'hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d 2024-11-20T19:25:07,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741836_1012 (size=42) 2024-11-20T19:25:08,391 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:08,391 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing d21e0da06747a4b3da8e29803090bc10, disabling compactions & flushes 2024-11-20T19:25:08,392 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10. 2024-11-20T19:25:08,392 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10. 2024-11-20T19:25:08,392 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10. after waiting 0 ms 2024-11-20T19:25:08,392 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10. 2024-11-20T19:25:08,392 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10. 2024-11-20T19:25:08,392 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for d21e0da06747a4b3da8e29803090bc10: 2024-11-20T19:25:08,395 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T19:25:08,403 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732130708396"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732130708396"}]},"ts":"1732130708396"} 2024-11-20T19:25:08,427 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T19:25:08,429 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T19:25:08,434 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130708430"}]},"ts":"1732130708430"} 2024-11-20T19:25:08,438 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-20T19:25:08,464 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=d21e0da06747a4b3da8e29803090bc10, ASSIGN}] 2024-11-20T19:25:08,467 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=d21e0da06747a4b3da8e29803090bc10, ASSIGN 2024-11-20T19:25:08,469 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=d21e0da06747a4b3da8e29803090bc10, ASSIGN; state=OFFLINE, location=db9c3a6c6492,35979,1732130703276; forceNewPlan=false, retain=false 2024-11-20T19:25:08,620 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=d21e0da06747a4b3da8e29803090bc10, regionState=OPENING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:08,632 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure d21e0da06747a4b3da8e29803090bc10, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:25:08,791 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:08,802 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10. 2024-11-20T19:25:08,802 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => d21e0da06747a4b3da8e29803090bc10, NAME => 'hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:25:08,803 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace d21e0da06747a4b3da8e29803090bc10 2024-11-20T19:25:08,803 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:08,803 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for d21e0da06747a4b3da8e29803090bc10 2024-11-20T19:25:08,803 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for d21e0da06747a4b3da8e29803090bc10 2024-11-20T19:25:08,810 INFO [StoreOpener-d21e0da06747a4b3da8e29803090bc10-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region d21e0da06747a4b3da8e29803090bc10 2024-11-20T19:25:08,820 INFO [StoreOpener-d21e0da06747a4b3da8e29803090bc10-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d21e0da06747a4b3da8e29803090bc10 columnFamilyName info 2024-11-20T19:25:08,820 DEBUG [StoreOpener-d21e0da06747a4b3da8e29803090bc10-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:08,823 INFO [StoreOpener-d21e0da06747a4b3da8e29803090bc10-1 {}] regionserver.HStore(327): Store=d21e0da06747a4b3da8e29803090bc10/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:08,828 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/namespace/d21e0da06747a4b3da8e29803090bc10 2024-11-20T19:25:08,833 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/namespace/d21e0da06747a4b3da8e29803090bc10 2024-11-20T19:25:08,845 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for d21e0da06747a4b3da8e29803090bc10 2024-11-20T19:25:08,867 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/namespace/d21e0da06747a4b3da8e29803090bc10/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:25:08,869 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened d21e0da06747a4b3da8e29803090bc10; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74509363, jitterRate=0.11027602851390839}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-20T19:25:08,871 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for d21e0da06747a4b3da8e29803090bc10: 2024-11-20T19:25:08,875 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10., pid=6, masterSystemTime=1732130708790 2024-11-20T19:25:08,880 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10. 2024-11-20T19:25:08,880 INFO [RS_OPEN_PRIORITY_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10. 2024-11-20T19:25:08,881 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=d21e0da06747a4b3da8e29803090bc10, regionState=OPEN, openSeqNum=2, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:08,894 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-20T19:25:08,894 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure d21e0da06747a4b3da8e29803090bc10, server=db9c3a6c6492,35979,1732130703276 in 255 msec 2024-11-20T19:25:08,905 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T19:25:08,905 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130708905"}]},"ts":"1732130708905"} 2024-11-20T19:25:08,906 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-20T19:25:08,906 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=d21e0da06747a4b3da8e29803090bc10, ASSIGN in 432 msec 2024-11-20T19:25:08,913 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-20T19:25:08,960 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-20T19:25:08,962 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T19:25:08,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.4150 sec 2024-11-20T19:25:08,971 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:08,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-20T19:25:08,972 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:25:09,018 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-20T19:25:09,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T19:25:09,071 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 55 msec 2024-11-20T19:25:09,077 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-20T19:25:09,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-20T19:25:09,119 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 40 msec 2024-11-20T19:25:09,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-20T19:25:09,179 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-20T19:25:09,179 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 5.799sec 2024-11-20T19:25:09,181 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-20T19:25:09,183 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-20T19:25:09,184 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-20T19:25:09,185 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-20T19:25:09,185 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-20T19:25:09,186 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9c3a6c6492,36861,1732130702494-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-20T19:25:09,186 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9c3a6c6492,36861,1732130702494-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-20T19:25:09,218 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-20T19:25:09,220 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-20T19:25:09,220 INFO [master/db9c3a6c6492:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=db9c3a6c6492,36861,1732130702494-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-20T19:25:09,252 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e541e88 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e83c466 2024-11-20T19:25:09,253 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-20T19:25:09,274 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@305a704f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:09,279 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-20T19:25:09,279 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-20T19:25:09,297 DEBUG [hconnection-0x68773b0e-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:09,317 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53888, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:09,335 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=db9c3a6c6492,36861,1732130702494 2024-11-20T19:25:09,373 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=196, ProcessCount=11, AvailableMemoryMB=4642 2024-11-20T19:25:09,387 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T19:25:09,395 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49996, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T19:25:09,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:25:09,411 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:25:09,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:09,418 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T19:25:09,418 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:09,425 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T19:25:09,426 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-20T19:25:09,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T19:25:09,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741837_1013 (size=963) 2024-11-20T19:25:09,478 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d 2024-11-20T19:25:09,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741838_1014 (size=53) 2024-11-20T19:25:09,495 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:09,495 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 6aad06303ed006b601a1faa1a93ab5da, disabling compactions & flushes 2024-11-20T19:25:09,495 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:09,496 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:09,496 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. after waiting 0 ms 2024-11-20T19:25:09,496 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:09,496 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:09,496 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:09,499 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T19:25:09,501 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732130709499"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732130709499"}]},"ts":"1732130709499"} 2024-11-20T19:25:09,505 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T19:25:09,516 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T19:25:09,516 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130709516"}]},"ts":"1732130709516"} 2024-11-20T19:25:09,528 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T19:25:09,545 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6aad06303ed006b601a1faa1a93ab5da, ASSIGN}] 2024-11-20T19:25:09,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T19:25:09,548 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6aad06303ed006b601a1faa1a93ab5da, ASSIGN 2024-11-20T19:25:09,550 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=6aad06303ed006b601a1faa1a93ab5da, ASSIGN; state=OFFLINE, location=db9c3a6c6492,35979,1732130703276; forceNewPlan=false, retain=false 2024-11-20T19:25:09,701 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=6aad06303ed006b601a1faa1a93ab5da, regionState=OPENING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:09,707 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:25:09,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T19:25:09,862 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:09,888 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:09,888 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:25:09,889 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:09,890 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:09,890 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:09,890 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:09,904 INFO [StoreOpener-6aad06303ed006b601a1faa1a93ab5da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:09,928 INFO [StoreOpener-6aad06303ed006b601a1faa1a93ab5da-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:09,929 INFO [StoreOpener-6aad06303ed006b601a1faa1a93ab5da-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6aad06303ed006b601a1faa1a93ab5da columnFamilyName A 2024-11-20T19:25:09,929 DEBUG [StoreOpener-6aad06303ed006b601a1faa1a93ab5da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:09,931 INFO [StoreOpener-6aad06303ed006b601a1faa1a93ab5da-1 {}] regionserver.HStore(327): Store=6aad06303ed006b601a1faa1a93ab5da/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:09,931 INFO [StoreOpener-6aad06303ed006b601a1faa1a93ab5da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:09,938 INFO [StoreOpener-6aad06303ed006b601a1faa1a93ab5da-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:09,938 INFO [StoreOpener-6aad06303ed006b601a1faa1a93ab5da-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6aad06303ed006b601a1faa1a93ab5da columnFamilyName B 2024-11-20T19:25:09,939 DEBUG [StoreOpener-6aad06303ed006b601a1faa1a93ab5da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:09,942 INFO [StoreOpener-6aad06303ed006b601a1faa1a93ab5da-1 {}] regionserver.HStore(327): Store=6aad06303ed006b601a1faa1a93ab5da/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:09,942 INFO [StoreOpener-6aad06303ed006b601a1faa1a93ab5da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:09,946 INFO [StoreOpener-6aad06303ed006b601a1faa1a93ab5da-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:09,947 INFO [StoreOpener-6aad06303ed006b601a1faa1a93ab5da-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6aad06303ed006b601a1faa1a93ab5da columnFamilyName C 2024-11-20T19:25:09,947 DEBUG [StoreOpener-6aad06303ed006b601a1faa1a93ab5da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:09,948 INFO [StoreOpener-6aad06303ed006b601a1faa1a93ab5da-1 {}] regionserver.HStore(327): Store=6aad06303ed006b601a1faa1a93ab5da/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:09,948 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:09,950 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:09,952 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:09,957 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:25:09,962 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:09,969 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:25:09,970 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 6aad06303ed006b601a1faa1a93ab5da; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64237369, jitterRate=-0.042788609862327576}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:25:09,972 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:09,974 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., pid=11, masterSystemTime=1732130709862 2024-11-20T19:25:09,998 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:09,998 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:10,000 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=6aad06303ed006b601a1faa1a93ab5da, regionState=OPEN, openSeqNum=2, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:10,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-20T19:25:10,014 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 in 300 msec 2024-11-20T19:25:10,018 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-20T19:25:10,018 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6aad06303ed006b601a1faa1a93ab5da, ASSIGN in 467 msec 2024-11-20T19:25:10,020 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T19:25:10,021 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130710020"}]},"ts":"1732130710020"} 2024-11-20T19:25:10,026 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T19:25:10,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T19:25:10,091 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T19:25:10,097 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 680 msec 2024-11-20T19:25:10,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-20T19:25:10,567 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-20T19:25:10,572 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f6e36fe to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e98ea32 2024-11-20T19:25:10,617 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b9fcedf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:10,621 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:10,653 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53892, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:10,657 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T19:25:10,675 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50004, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T19:25:10,691 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f343a4d to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12885408 2024-11-20T19:25:10,711 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9bd0964, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:10,717 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22cb07dd to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72b32f98 2024-11-20T19:25:10,730 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18cb251d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:10,735 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x478bae6b to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4977266 2024-11-20T19:25:10,774 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45b55c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:10,777 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5400112e to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bbb5d8a 2024-11-20T19:25:10,815 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e52b42a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:10,818 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38766d64 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18603bb9 2024-11-20T19:25:10,833 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3883f7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:10,837 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x295cb1ac to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72e97e4b 2024-11-20T19:25:10,860 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a1285d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:10,862 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70267494 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@490457fd 2024-11-20T19:25:10,880 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527c6d40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:10,883 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d2a8e08 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c8de680 2024-11-20T19:25:10,902 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fe2fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:10,906 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c915d17 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f6b07e3 2024-11-20T19:25:10,923 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@595e9ebe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:10,962 DEBUG [hconnection-0x7b5d524b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:10,971 DEBUG [hconnection-0x59c3390f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:10,978 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:10,986 DEBUG [hconnection-0x48af454-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:10,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-20T19:25:10,986 DEBUG [hconnection-0x25557d7e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:10,990 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:10,993 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:10,996 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:11,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T19:25:11,017 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53902, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:11,026 DEBUG [hconnection-0x17b3816e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:11,026 DEBUG [hconnection-0x134cb240-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:11,044 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53910, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:11,058 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53912, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:11,077 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53926, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:11,084 DEBUG [hconnection-0x6a7f891-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:11,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T19:25:11,117 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53940, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:11,121 DEBUG [hconnection-0x3391cff4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:11,129 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53948, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:11,136 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53950, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:11,153 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53960, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:11,160 DEBUG [hconnection-0x3fb41cfe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:11,181 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53966, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:11,181 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,183 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:11,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:11,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:11,229 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T19:25:11,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:11,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:11,231 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:11,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:11,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:11,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:11,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:11,286 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:11,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T19:25:11,400 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,401 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:11,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:11,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:11,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:11,421 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/22180d8bee424eae9c2a3c769bb45bb1 is 50, key is test_row_0/A:col10/1732130711213/Put/seqid=0 2024-11-20T19:25:11,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130771466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130771472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130771490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130771499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130771501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741839_1015 (size=12001) 2024-11-20T19:25:11,585 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/22180d8bee424eae9c2a3c769bb45bb1 2024-11-20T19:25:11,588 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,589 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:11,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T19:25:11,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:11,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:11,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:11,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130771672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130771673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130771674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130771673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130771671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,783 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:11,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:11,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:11,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:11,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,810 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/0f3823c0d19d452cbb54e1f29b87a91d is 50, key is test_row_0/B:col10/1732130711213/Put/seqid=0 2024-11-20T19:25:11,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741840_1016 (size=12001) 2024-11-20T19:25:11,848 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/0f3823c0d19d452cbb54e1f29b87a91d 2024-11-20T19:25:11,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130771887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130771886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130771891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:11,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130771898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130771891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,946 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:11,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:11,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:11,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:11,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:11,948 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:11,952 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/b79f78470eb544058f7fd776bfc7d0c3 is 50, key is test_row_0/C:col10/1732130711213/Put/seqid=0 2024-11-20T19:25:12,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741841_1017 (size=12001) 2024-11-20T19:25:12,025 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/b79f78470eb544058f7fd776bfc7d0c3 2024-11-20T19:25:12,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/22180d8bee424eae9c2a3c769bb45bb1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/22180d8bee424eae9c2a3c769bb45bb1 2024-11-20T19:25:12,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/22180d8bee424eae9c2a3c769bb45bb1, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T19:25:12,075 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T19:25:12,077 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-20T19:25:12,078 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-20T19:25:12,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/0f3823c0d19d452cbb54e1f29b87a91d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/0f3823c0d19d452cbb54e1f29b87a91d 2024-11-20T19:25:12,104 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:12,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:12,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:12,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:12,105 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T19:25:12,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/0f3823c0d19d452cbb54e1f29b87a91d, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T19:25:12,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/b79f78470eb544058f7fd776bfc7d0c3 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/b79f78470eb544058f7fd776bfc7d0c3 2024-11-20T19:25:12,173 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/b79f78470eb544058f7fd776bfc7d0c3, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T19:25:12,175 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 6aad06303ed006b601a1faa1a93ab5da in 947ms, sequenceid=15, compaction requested=false 2024-11-20T19:25:12,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:12,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:12,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T19:25:12,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:12,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:12,223 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:12,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:12,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:12,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:12,263 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/f20a57254d4e438a9cee5b83a440e03e is 50, key is test_row_0/A:col10/1732130711443/Put/seqid=0 2024-11-20T19:25:12,266 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,268 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:12,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:12,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:12,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:12,269 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130772259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130772268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130772269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130772277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130772282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741842_1018 (size=14341) 2024-11-20T19:25:12,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130772386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130772392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130772394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,409 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130772405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,424 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,426 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:12,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:12,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:12,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:12,427 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130772448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130772605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,616 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130772607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,617 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:12,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:12,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:12,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:12,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130772609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130772629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,674 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130772663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,748 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/f20a57254d4e438a9cee5b83a440e03e 2024-11-20T19:25:12,774 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:12,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:12,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:12,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:12,776 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,797 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a24a33034ed8478f8303247a96f61960 is 50, key is test_row_0/B:col10/1732130711443/Put/seqid=0 2024-11-20T19:25:12,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741843_1019 (size=12001) 2024-11-20T19:25:12,854 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a24a33034ed8478f8303247a96f61960 2024-11-20T19:25:12,863 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T19:25:12,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/0831a4766f7940a59b87e488b702d465 is 50, key is test_row_0/C:col10/1732130711443/Put/seqid=0 2024-11-20T19:25:12,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,931 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130772922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:12,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:12,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:12,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:12,932 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:12,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130772922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130772939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130772941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:12,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130772981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:12,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741844_1020 (size=12001) 2024-11-20T19:25:12,988 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/0831a4766f7940a59b87e488b702d465 2024-11-20T19:25:13,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/f20a57254d4e438a9cee5b83a440e03e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/f20a57254d4e438a9cee5b83a440e03e 2024-11-20T19:25:13,024 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/f20a57254d4e438a9cee5b83a440e03e, entries=200, sequenceid=40, filesize=14.0 K 2024-11-20T19:25:13,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a24a33034ed8478f8303247a96f61960 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a24a33034ed8478f8303247a96f61960 2024-11-20T19:25:13,056 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a24a33034ed8478f8303247a96f61960, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T19:25:13,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/0831a4766f7940a59b87e488b702d465 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/0831a4766f7940a59b87e488b702d465 2024-11-20T19:25:13,091 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:13,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:13,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/0831a4766f7940a59b87e488b702d465, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T19:25:13,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:13,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:13,093 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:13,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:13,095 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 6aad06303ed006b601a1faa1a93ab5da in 875ms, sequenceid=40, compaction requested=false 2024-11-20T19:25:13,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:13,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:13,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T19:25:13,152 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-20T19:25:13,152 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-20T19:25:13,154 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-20T19:25:13,154 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-20T19:25:13,156 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T19:25:13,156 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-20T19:25:13,156 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-20T19:25:13,156 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-20T19:25:13,158 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-20T19:25:13,158 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-20T19:25:13,249 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,250 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-20T19:25:13,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:13,251 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:13,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:13,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:13,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:13,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:13,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:13,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:13,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/6db46912df1d4cef9a90fa0a5e5ff18e is 50, key is test_row_0/A:col10/1732130712275/Put/seqid=0 2024-11-20T19:25:13,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741845_1021 (size=12001) 2024-11-20T19:25:13,375 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/6db46912df1d4cef9a90fa0a5e5ff18e 2024-11-20T19:25:13,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/ebcc4aa49e7e406cadf44145deadf29a is 50, key is test_row_0/B:col10/1732130712275/Put/seqid=0 2024-11-20T19:25:13,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:13,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741846_1022 (size=12001) 2024-11-20T19:25:13,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130773553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130773562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,574 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130773562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130773582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130773582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130773671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130773676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130773685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130773704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130773700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,896 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/ebcc4aa49e7e406cadf44145deadf29a 2024-11-20T19:25:13,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130773891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,898 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130773892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130773896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130773925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:13,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130773925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:13,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/3c8bf4ddc1da446d8bcb2190b320fd45 is 50, key is test_row_0/C:col10/1732130712275/Put/seqid=0 2024-11-20T19:25:14,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741847_1023 (size=12001) 2024-11-20T19:25:14,047 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/3c8bf4ddc1da446d8bcb2190b320fd45 2024-11-20T19:25:14,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/6db46912df1d4cef9a90fa0a5e5ff18e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/6db46912df1d4cef9a90fa0a5e5ff18e 2024-11-20T19:25:14,077 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/6db46912df1d4cef9a90fa0a5e5ff18e, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T19:25:14,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/ebcc4aa49e7e406cadf44145deadf29a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/ebcc4aa49e7e406cadf44145deadf29a 2024-11-20T19:25:14,106 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/ebcc4aa49e7e406cadf44145deadf29a, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T19:25:14,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/3c8bf4ddc1da446d8bcb2190b320fd45 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/3c8bf4ddc1da446d8bcb2190b320fd45 2024-11-20T19:25:14,148 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/3c8bf4ddc1da446d8bcb2190b320fd45, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T19:25:14,150 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 6aad06303ed006b601a1faa1a93ab5da in 900ms, sequenceid=51, compaction requested=true 2024-11-20T19:25:14,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:14,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:14,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-20T19:25:14,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-20T19:25:14,158 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-20T19:25:14,158 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1570 sec 2024-11-20T19:25:14,163 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 3.1800 sec 2024-11-20T19:25:14,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T19:25:14,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:14,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:14,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:14,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:14,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:14,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:14,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:14,251 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/b6d5ee6f291e421280dc829aef7c4228 is 50, key is test_row_0/A:col10/1732130713562/Put/seqid=0 2024-11-20T19:25:14,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130774244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130774247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130774250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130774256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130774259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741848_1024 (size=12001) 2024-11-20T19:25:14,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130774360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130774363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130774373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130774376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130774377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130774567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130774581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130774580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130774583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130774584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,722 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/b6d5ee6f291e421280dc829aef7c4228 2024-11-20T19:25:14,766 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/ebbeda9c43a2459eae601f5f27731bd5 is 50, key is test_row_0/B:col10/1732130713562/Put/seqid=0 2024-11-20T19:25:14,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741849_1025 (size=12001) 2024-11-20T19:25:14,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130774874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130774889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130774889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130774889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:14,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:14,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130774890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:15,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-20T19:25:15,119 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-20T19:25:15,134 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:15,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-20T19:25:15,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T19:25:15,146 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:15,152 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:15,152 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:15,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/ebbeda9c43a2459eae601f5f27731bd5 2024-11-20T19:25:15,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T19:25:15,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/c26b68ce67944b9089384198c9b94144 is 50, key is test_row_0/C:col10/1732130713562/Put/seqid=0 2024-11-20T19:25:15,308 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:15,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T19:25:15,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:15,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:15,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:15,309 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741850_1026 (size=12001) 2024-11-20T19:25:15,314 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/c26b68ce67944b9089384198c9b94144 2024-11-20T19:25:15,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/b6d5ee6f291e421280dc829aef7c4228 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/b6d5ee6f291e421280dc829aef7c4228 2024-11-20T19:25:15,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/b6d5ee6f291e421280dc829aef7c4228, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T19:25:15,369 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/ebbeda9c43a2459eae601f5f27731bd5 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/ebbeda9c43a2459eae601f5f27731bd5 2024-11-20T19:25:15,392 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/ebbeda9c43a2459eae601f5f27731bd5, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T19:25:15,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/c26b68ce67944b9089384198c9b94144 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/c26b68ce67944b9089384198c9b94144 2024-11-20T19:25:15,423 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/c26b68ce67944b9089384198c9b94144, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T19:25:15,425 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=40.25 KB/41220 for 6aad06303ed006b601a1faa1a93ab5da in 1198ms, sequenceid=80, compaction requested=true 2024-11-20T19:25:15,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:15,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:15,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:15,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:15,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:15,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:15,429 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T19:25:15,429 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:15,430 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:15,435 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:15,438 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/B is initiating minor compaction (all files) 2024-11-20T19:25:15,438 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/B in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:15,439 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/0f3823c0d19d452cbb54e1f29b87a91d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a24a33034ed8478f8303247a96f61960, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/ebcc4aa49e7e406cadf44145deadf29a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/ebbeda9c43a2459eae601f5f27731bd5] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=46.9 K 2024-11-20T19:25:15,439 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50344 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:15,439 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/A is initiating minor compaction (all files) 2024-11-20T19:25:15,439 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/A in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:15,440 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/22180d8bee424eae9c2a3c769bb45bb1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/f20a57254d4e438a9cee5b83a440e03e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/6db46912df1d4cef9a90fa0a5e5ff18e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/b6d5ee6f291e421280dc829aef7c4228] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=49.2 K 2024-11-20T19:25:15,445 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22180d8bee424eae9c2a3c769bb45bb1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732130711180 2024-11-20T19:25:15,446 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f3823c0d19d452cbb54e1f29b87a91d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732130711180 2024-11-20T19:25:15,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T19:25:15,448 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a24a33034ed8478f8303247a96f61960, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732130711443 2024-11-20T19:25:15,449 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting f20a57254d4e438a9cee5b83a440e03e, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732130711443 2024-11-20T19:25:15,450 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting ebcc4aa49e7e406cadf44145deadf29a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732130712260 2024-11-20T19:25:15,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:15,451 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:15,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:15,451 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting ebbeda9c43a2459eae601f5f27731bd5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732130713562 2024-11-20T19:25:15,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:15,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:15,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:15,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:15,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:15,455 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6db46912df1d4cef9a90fa0a5e5ff18e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732130712260 2024-11-20T19:25:15,463 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6d5ee6f291e421280dc829aef7c4228, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732130713562 2024-11-20T19:25:15,466 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:15,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T19:25:15,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:15,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:15,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:15,468 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,504 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/b66736ddd76c4f9b9e332d30e66b7514 is 50, key is test_row_0/A:col10/1732130715446/Put/seqid=0 2024-11-20T19:25:15,571 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#B#compaction#14 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:15,574 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#A#compaction#13 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:15,575 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/da89db61df6b4119bbd0cdc5dc435fe6 is 50, key is test_row_0/A:col10/1732130713562/Put/seqid=0 2024-11-20T19:25:15,584 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/8e44786668b84f5d8255591fc95003b8 is 50, key is test_row_0/B:col10/1732130713562/Put/seqid=0 2024-11-20T19:25:15,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741851_1027 (size=12001) 2024-11-20T19:25:15,592 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/b66736ddd76c4f9b9e332d30e66b7514 2024-11-20T19:25:15,634 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:15,635 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T19:25:15,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:15,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:15,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:15,636 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741852_1028 (size=12139) 2024-11-20T19:25:15,661 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/da89db61df6b4119bbd0cdc5dc435fe6 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/da89db61df6b4119bbd0cdc5dc435fe6 2024-11-20T19:25:15,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741853_1029 (size=12139) 2024-11-20T19:25:15,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/4377eb601e854ad6ba02b219e0b1bc88 is 50, key is test_row_0/B:col10/1732130715446/Put/seqid=0 2024-11-20T19:25:15,701 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/8e44786668b84f5d8255591fc95003b8 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/8e44786668b84f5d8255591fc95003b8 2024-11-20T19:25:15,703 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/A of 6aad06303ed006b601a1faa1a93ab5da into da89db61df6b4119bbd0cdc5dc435fe6(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:15,704 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:15,704 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/A, priority=12, startTime=1732130715428; duration=0sec 2024-11-20T19:25:15,709 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:15,710 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:A 2024-11-20T19:25:15,710 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:15,716 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/B of 6aad06303ed006b601a1faa1a93ab5da into 8e44786668b84f5d8255591fc95003b8(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:15,716 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:15,716 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/B, priority=12, startTime=1732130715428; duration=0sec 2024-11-20T19:25:15,717 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:15,717 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:B 2024-11-20T19:25:15,719 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:15,720 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/C is initiating minor compaction (all files) 2024-11-20T19:25:15,720 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/C in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:15,720 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/b79f78470eb544058f7fd776bfc7d0c3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/0831a4766f7940a59b87e488b702d465, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/3c8bf4ddc1da446d8bcb2190b320fd45, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/c26b68ce67944b9089384198c9b94144] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=46.9 K 2024-11-20T19:25:15,722 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting b79f78470eb544058f7fd776bfc7d0c3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732130711180 2024-11-20T19:25:15,725 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0831a4766f7940a59b87e488b702d465, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732130711443 2024-11-20T19:25:15,728 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c8bf4ddc1da446d8bcb2190b320fd45, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732130712260 2024-11-20T19:25:15,732 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting c26b68ce67944b9089384198c9b94144, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732130713562 2024-11-20T19:25:15,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741854_1030 (size=12001) 2024-11-20T19:25:15,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T19:25:15,751 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/4377eb601e854ad6ba02b219e0b1bc88 2024-11-20T19:25:15,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130775686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:15,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130775743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:15,791 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:15,792 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T19:25:15,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:15,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:15,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:15,792 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/2d5b2a92bbd44986bded953ef221511a is 50, key is test_row_0/C:col10/1732130715446/Put/seqid=0 2024-11-20T19:25:15,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130775752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:15,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130775758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:15,829 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#C#compaction#17 average throughput is 0.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:15,830 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/e04c465e3df34a028e72beedcb454471 is 50, key is test_row_0/C:col10/1732130713562/Put/seqid=0 2024-11-20T19:25:15,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130775776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:15,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741855_1031 (size=12001) 2024-11-20T19:25:15,860 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/2d5b2a92bbd44986bded953ef221511a 2024-11-20T19:25:15,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/b66736ddd76c4f9b9e332d30e66b7514 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/b66736ddd76c4f9b9e332d30e66b7514 2024-11-20T19:25:15,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130775863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:15,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130775865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:15,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/b66736ddd76c4f9b9e332d30e66b7514, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T19:25:15,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/4377eb601e854ad6ba02b219e0b1bc88 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/4377eb601e854ad6ba02b219e0b1bc88 2024-11-20T19:25:15,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741856_1032 (size=12139) 2024-11-20T19:25:15,928 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/e04c465e3df34a028e72beedcb454471 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/e04c465e3df34a028e72beedcb454471 2024-11-20T19:25:15,938 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/4377eb601e854ad6ba02b219e0b1bc88, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T19:25:15,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/2d5b2a92bbd44986bded953ef221511a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/2d5b2a92bbd44986bded953ef221511a 2024-11-20T19:25:15,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:15,946 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:15,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130775921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:15,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T19:25:15,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:15,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:15,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:15,948 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,949 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/C of 6aad06303ed006b601a1faa1a93ab5da into e04c465e3df34a028e72beedcb454471(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:15,949 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:15,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:15,950 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/C, priority=12, startTime=1732130715429; duration=0sec 2024-11-20T19:25:15,950 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:15,950 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:C 2024-11-20T19:25:15,959 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/2d5b2a92bbd44986bded953ef221511a, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T19:25:15,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 6aad06303ed006b601a1faa1a93ab5da in 520ms, sequenceid=91, compaction requested=false 2024-11-20T19:25:15,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:15,989 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T19:25:15,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:15,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:15,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:15,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:15,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:15,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:15,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:16,012 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/bea8a37abb8a4673a7cb85dbc4e35aa1 is 50, key is test_row_0/A:col10/1732130715668/Put/seqid=0 2024-11-20T19:25:16,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130776036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,057 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130776041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741857_1033 (size=12001) 2024-11-20T19:25:16,101 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T19:25:16,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:16,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:16,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:16,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:16,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:16,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:16,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130776102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130776102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130776144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130776151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130776167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T19:25:16,257 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T19:25:16,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:16,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:16,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:16,258 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:16,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:16,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:16,357 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130776349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130776394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,413 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T19:25:16,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:16,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:16,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:16,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:16,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:16,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130776417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130776420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=122 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/bea8a37abb8a4673a7cb85dbc4e35aa1 2024-11-20T19:25:16,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130776480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/6e65cbebfdf04bc28448d53afe1a3fb5 is 50, key is test_row_0/B:col10/1732130715668/Put/seqid=0 2024-11-20T19:25:16,568 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T19:25:16,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:16,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:16,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:16,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:16,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:16,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:16,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741858_1034 (size=12001) 2024-11-20T19:25:16,587 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=122 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/6e65cbebfdf04bc28448d53afe1a3fb5 2024-11-20T19:25:16,624 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/63b0de508eaa4adb93ec808979afcb97 is 50, key is test_row_0/C:col10/1732130715668/Put/seqid=0 2024-11-20T19:25:16,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741859_1035 (size=12001) 2024-11-20T19:25:16,672 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=122 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/63b0de508eaa4adb93ec808979afcb97 2024-11-20T19:25:16,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130776663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/bea8a37abb8a4673a7cb85dbc4e35aa1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/bea8a37abb8a4673a7cb85dbc4e35aa1 2024-11-20T19:25:16,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/bea8a37abb8a4673a7cb85dbc4e35aa1, entries=150, sequenceid=122, filesize=11.7 K 2024-11-20T19:25:16,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/6e65cbebfdf04bc28448d53afe1a3fb5 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6e65cbebfdf04bc28448d53afe1a3fb5 2024-11-20T19:25:16,718 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6e65cbebfdf04bc28448d53afe1a3fb5, entries=150, sequenceid=122, filesize=11.7 K 2024-11-20T19:25:16,722 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/63b0de508eaa4adb93ec808979afcb97 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/63b0de508eaa4adb93ec808979afcb97 2024-11-20T19:25:16,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:16,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130776709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,725 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,726 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T19:25:16,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:16,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:16,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:16,727 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:16,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:16,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:16,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/63b0de508eaa4adb93ec808979afcb97, entries=150, sequenceid=122, filesize=11.7 K 2024-11-20T19:25:16,755 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 6aad06303ed006b601a1faa1a93ab5da in 767ms, sequenceid=122, compaction requested=true 2024-11-20T19:25:16,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:16,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:16,756 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:16,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:16,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:16,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:16,756 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:16,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:16,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:16,759 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:16,759 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/A is initiating minor compaction (all files) 2024-11-20T19:25:16,760 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/A in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:16,760 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/da89db61df6b4119bbd0cdc5dc435fe6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/b66736ddd76c4f9b9e332d30e66b7514, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/bea8a37abb8a4673a7cb85dbc4e35aa1] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=35.3 K 2024-11-20T19:25:16,761 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:16,761 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/B is initiating minor compaction (all files) 2024-11-20T19:25:16,761 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/B in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:16,761 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/8e44786668b84f5d8255591fc95003b8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/4377eb601e854ad6ba02b219e0b1bc88, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6e65cbebfdf04bc28448d53afe1a3fb5] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=35.3 K 2024-11-20T19:25:16,762 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e44786668b84f5d8255591fc95003b8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732130713562 2024-11-20T19:25:16,762 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting da89db61df6b4119bbd0cdc5dc435fe6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732130713562 2024-11-20T19:25:16,764 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting b66736ddd76c4f9b9e332d30e66b7514, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130714246 2024-11-20T19:25:16,764 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 4377eb601e854ad6ba02b219e0b1bc88, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130714246 2024-11-20T19:25:16,765 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting bea8a37abb8a4673a7cb85dbc4e35aa1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732130715668 2024-11-20T19:25:16,766 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e65cbebfdf04bc28448d53afe1a3fb5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732130715668 2024-11-20T19:25:16,802 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#B#compaction#21 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:16,803 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/efa284dc78094e0b845de0ac8ede097b is 50, key is test_row_0/B:col10/1732130715668/Put/seqid=0 2024-11-20T19:25:16,819 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#A#compaction#22 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:16,820 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/01399b0fba2f47c49755b1227d75e6ee is 50, key is test_row_0/A:col10/1732130715668/Put/seqid=0 2024-11-20T19:25:16,880 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:16,889 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-20T19:25:16,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:16,890 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T19:25:16,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:16,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:16,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:16,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:16,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:16,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:16,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741860_1036 (size=12241) 2024-11-20T19:25:16,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741861_1037 (size=12241) 2024-11-20T19:25:16,931 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/01399b0fba2f47c49755b1227d75e6ee as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/01399b0fba2f47c49755b1227d75e6ee 2024-11-20T19:25:16,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/ec5cbffc4cfe4970bb2fcae30d3ed8c8 is 50, key is test_row_0/A:col10/1732130716024/Put/seqid=0 2024-11-20T19:25:16,948 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/A of 6aad06303ed006b601a1faa1a93ab5da into 01399b0fba2f47c49755b1227d75e6ee(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:16,948 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:16,948 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/A, priority=13, startTime=1732130716756; duration=0sec 2024-11-20T19:25:16,949 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:16,949 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:A 2024-11-20T19:25:16,949 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:16,951 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:16,951 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/C is initiating minor compaction (all files) 2024-11-20T19:25:16,952 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/C in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:16,952 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/e04c465e3df34a028e72beedcb454471, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/2d5b2a92bbd44986bded953ef221511a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/63b0de508eaa4adb93ec808979afcb97] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=35.3 K 2024-11-20T19:25:16,953 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting e04c465e3df34a028e72beedcb454471, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732130713562 2024-11-20T19:25:16,954 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d5b2a92bbd44986bded953ef221511a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130714246 2024-11-20T19:25:16,955 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63b0de508eaa4adb93ec808979afcb97, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732130715668 2024-11-20T19:25:16,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:16,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:17,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741862_1038 (size=12051) 2024-11-20T19:25:17,005 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/ec5cbffc4cfe4970bb2fcae30d3ed8c8 2024-11-20T19:25:17,006 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#C#compaction#24 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:17,007 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/42c825f9438c44c684779d0e0b2112f0 is 50, key is test_row_0/C:col10/1732130715668/Put/seqid=0 2024-11-20T19:25:17,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/90c3fc467a6f42ce90d10a765ba689ee is 50, key is test_row_0/B:col10/1732130716024/Put/seqid=0 2024-11-20T19:25:17,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741863_1039 (size=12241) 2024-11-20T19:25:17,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130777088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:17,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130777089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:17,104 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/42c825f9438c44c684779d0e0b2112f0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/42c825f9438c44c684779d0e0b2112f0 2024-11-20T19:25:17,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130777091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:17,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741864_1040 (size=12051) 2024-11-20T19:25:17,122 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/C of 6aad06303ed006b601a1faa1a93ab5da into 42c825f9438c44c684779d0e0b2112f0(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:17,122 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:17,122 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/C, priority=13, startTime=1732130716756; duration=0sec 2024-11-20T19:25:17,123 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:17,123 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:C 2024-11-20T19:25:17,123 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/90c3fc467a6f42ce90d10a765ba689ee 2024-11-20T19:25:17,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/f18c0867ca26447283c5fb342fbf0d12 is 50, key is test_row_0/C:col10/1732130716024/Put/seqid=0 2024-11-20T19:25:17,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741865_1041 (size=12051) 2024-11-20T19:25:17,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130777186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:17,202 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/f18c0867ca26447283c5fb342fbf0d12 2024-11-20T19:25:17,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130777205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:17,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/ec5cbffc4cfe4970bb2fcae30d3ed8c8 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ec5cbffc4cfe4970bb2fcae30d3ed8c8 2024-11-20T19:25:17,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130777214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:17,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130777213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:17,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130777227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:17,237 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ec5cbffc4cfe4970bb2fcae30d3ed8c8, entries=150, sequenceid=130, filesize=11.8 K 2024-11-20T19:25:17,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/90c3fc467a6f42ce90d10a765ba689ee as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/90c3fc467a6f42ce90d10a765ba689ee 2024-11-20T19:25:17,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T19:25:17,255 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/90c3fc467a6f42ce90d10a765ba689ee, entries=150, sequenceid=130, filesize=11.8 K 2024-11-20T19:25:17,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/f18c0867ca26447283c5fb342fbf0d12 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f18c0867ca26447283c5fb342fbf0d12 2024-11-20T19:25:17,274 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f18c0867ca26447283c5fb342fbf0d12, entries=150, sequenceid=130, filesize=11.8 K 2024-11-20T19:25:17,276 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 6aad06303ed006b601a1faa1a93ab5da in 386ms, sequenceid=130, compaction requested=false 2024-11-20T19:25:17,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:17,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:17,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-20T19:25:17,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-20T19:25:17,284 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-20T19:25:17,285 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1270 sec 2024-11-20T19:25:17,289 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 2.1510 sec 2024-11-20T19:25:17,306 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/efa284dc78094e0b845de0ac8ede097b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/efa284dc78094e0b845de0ac8ede097b 2024-11-20T19:25:17,325 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/B of 6aad06303ed006b601a1faa1a93ab5da into efa284dc78094e0b845de0ac8ede097b(size=12.0 K), total size for store is 23.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:17,325 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:17,325 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/B, priority=13, startTime=1732130716756; duration=0sec 2024-11-20T19:25:17,326 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:17,326 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:B 2024-11-20T19:25:17,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:17,427 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-20T19:25:17,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:17,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:17,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:17,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:17,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:17,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:17,479 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/67affd162c76432f9083a95564243ae1 is 50, key is test_row_0/A:col10/1732130717424/Put/seqid=0 2024-11-20T19:25:17,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130777484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:17,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130777489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:17,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130777476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:17,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741866_1042 (size=12151) 2024-11-20T19:25:17,517 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/67affd162c76432f9083a95564243ae1 2024-11-20T19:25:17,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a0754508617141dda2fbf8d19eea3471 is 50, key is test_row_0/B:col10/1732130717424/Put/seqid=0 2024-11-20T19:25:17,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130777604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:17,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130777607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:17,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130777607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:17,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741867_1043 (size=12151) 2024-11-20T19:25:17,622 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a0754508617141dda2fbf8d19eea3471 2024-11-20T19:25:17,647 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/f921390337fc4f2a99bfed868907987d is 50, key is test_row_0/C:col10/1732130717424/Put/seqid=0 2024-11-20T19:25:17,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741868_1044 (size=12151) 2024-11-20T19:25:17,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130777812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:17,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130777814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:17,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:17,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130777820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:18,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/f921390337fc4f2a99bfed868907987d 2024-11-20T19:25:18,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/67affd162c76432f9083a95564243ae1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/67affd162c76432f9083a95564243ae1 2024-11-20T19:25:18,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130778122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:18,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130778123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:18,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/67affd162c76432f9083a95564243ae1, entries=150, sequenceid=162, filesize=11.9 K 2024-11-20T19:25:18,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130778138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:18,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a0754508617141dda2fbf8d19eea3471 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a0754508617141dda2fbf8d19eea3471 2024-11-20T19:25:18,165 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a0754508617141dda2fbf8d19eea3471, entries=150, sequenceid=162, filesize=11.9 K 2024-11-20T19:25:18,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/f921390337fc4f2a99bfed868907987d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f921390337fc4f2a99bfed868907987d 2024-11-20T19:25:18,189 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f921390337fc4f2a99bfed868907987d, entries=150, sequenceid=162, filesize=11.9 K 2024-11-20T19:25:18,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 6aad06303ed006b601a1faa1a93ab5da in 764ms, sequenceid=162, compaction requested=true 2024-11-20T19:25:18,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:18,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:18,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:18,192 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:18,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:18,193 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:18,193 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:18,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:18,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:18,195 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:18,195 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:18,195 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/B is initiating minor compaction (all files) 2024-11-20T19:25:18,195 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/A is initiating minor compaction (all files) 2024-11-20T19:25:18,195 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/B in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:18,196 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/efa284dc78094e0b845de0ac8ede097b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/90c3fc467a6f42ce90d10a765ba689ee, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a0754508617141dda2fbf8d19eea3471] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=35.6 K 2024-11-20T19:25:18,196 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/A in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:18,196 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/01399b0fba2f47c49755b1227d75e6ee, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ec5cbffc4cfe4970bb2fcae30d3ed8c8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/67affd162c76432f9083a95564243ae1] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=35.6 K 2024-11-20T19:25:18,196 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting efa284dc78094e0b845de0ac8ede097b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732130715668 2024-11-20T19:25:18,197 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 90c3fc467a6f42ce90d10a765ba689ee, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732130716004 2024-11-20T19:25:18,197 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01399b0fba2f47c49755b1227d75e6ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732130715668 2024-11-20T19:25:18,199 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a0754508617141dda2fbf8d19eea3471, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732130717073 2024-11-20T19:25:18,199 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec5cbffc4cfe4970bb2fcae30d3ed8c8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732130716004 2024-11-20T19:25:18,200 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67affd162c76432f9083a95564243ae1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732130717073 2024-11-20T19:25:18,229 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#A#compaction#30 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:18,230 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/233365209b8c435392e6895c7cd2b409 is 50, key is test_row_0/A:col10/1732130717424/Put/seqid=0 2024-11-20T19:25:18,235 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#B#compaction#31 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:18,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:18,236 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a67b3d5dd29d4b549fbdfc30a9841a2f is 50, key is test_row_0/B:col10/1732130717424/Put/seqid=0 2024-11-20T19:25:18,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:18,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:18,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:18,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:18,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:18,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:18,238 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:18,292 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/aef16aaf336c4e19a0c4f4dee3b816af is 50, key is test_row_0/A:col10/1732130718227/Put/seqid=0 2024-11-20T19:25:18,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741869_1045 (size=12493) 2024-11-20T19:25:18,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741870_1046 (size=12493) 2024-11-20T19:25:18,345 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/233365209b8c435392e6895c7cd2b409 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/233365209b8c435392e6895c7cd2b409 2024-11-20T19:25:18,346 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a67b3d5dd29d4b549fbdfc30a9841a2f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a67b3d5dd29d4b549fbdfc30a9841a2f 2024-11-20T19:25:18,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741871_1047 (size=14541) 2024-11-20T19:25:18,452 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/A of 6aad06303ed006b601a1faa1a93ab5da into 233365209b8c435392e6895c7cd2b409(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:18,452 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/B of 6aad06303ed006b601a1faa1a93ab5da into a67b3d5dd29d4b549fbdfc30a9841a2f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:18,452 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:18,452 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:18,452 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/B, priority=13, startTime=1732130718192; duration=0sec 2024-11-20T19:25:18,452 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/A, priority=13, startTime=1732130718192; duration=0sec 2024-11-20T19:25:18,452 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:18,452 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:A 2024-11-20T19:25:18,452 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:18,453 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:B 2024-11-20T19:25:18,453 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:18,455 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:18,455 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/C is initiating minor compaction (all files) 2024-11-20T19:25:18,455 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/C in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:18,455 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/42c825f9438c44c684779d0e0b2112f0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f18c0867ca26447283c5fb342fbf0d12, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f921390337fc4f2a99bfed868907987d] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=35.6 K 2024-11-20T19:25:18,456 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42c825f9438c44c684779d0e0b2112f0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=122, earliestPutTs=1732130715668 2024-11-20T19:25:18,457 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting f18c0867ca26447283c5fb342fbf0d12, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732130716004 2024-11-20T19:25:18,458 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting f921390337fc4f2a99bfed868907987d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732130717073 2024-11-20T19:25:18,484 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#C#compaction#33 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:18,485 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/86bf254f55d342eb8a6b39b9687152ef is 50, key is test_row_0/C:col10/1732130717424/Put/seqid=0 2024-11-20T19:25:18,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741872_1048 (size=12493) 2024-11-20T19:25:18,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130778526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:18,540 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/86bf254f55d342eb8a6b39b9687152ef as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/86bf254f55d342eb8a6b39b9687152ef 2024-11-20T19:25:18,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130778533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:18,553 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/C of 6aad06303ed006b601a1faa1a93ab5da into 86bf254f55d342eb8a6b39b9687152ef(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:18,553 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:18,553 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/C, priority=13, startTime=1732130718193; duration=0sec 2024-11-20T19:25:18,553 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:18,553 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:C 2024-11-20T19:25:18,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130778632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:18,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130778632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:18,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130778640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:18,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130778646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:18,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130778645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:18,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/aef16aaf336c4e19a0c4f4dee3b816af 2024-11-20T19:25:18,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130778847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:18,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:18,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130778854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:18,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/09223248a6014eaf95ffe0aee435cd7d is 50, key is test_row_0/B:col10/1732130718227/Put/seqid=0 2024-11-20T19:25:18,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741873_1049 (size=12151) 2024-11-20T19:25:19,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:19,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130779158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:19,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:19,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130779162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:19,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-20T19:25:19,256 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-20T19:25:19,260 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:19,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-20T19:25:19,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T19:25:19,263 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:19,265 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:19,265 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:19,335 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/09223248a6014eaf95ffe0aee435cd7d 2024-11-20T19:25:19,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/9c7dfb291861485daca55953fcc2aada is 50, key is test_row_0/C:col10/1732130718227/Put/seqid=0 2024-11-20T19:25:19,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T19:25:19,419 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:19,421 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T19:25:19,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:19,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:19,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:19,422 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741874_1050 (size=12151) 2024-11-20T19:25:19,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T19:25:19,575 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:19,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T19:25:19,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:19,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:19,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:19,577 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:19,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130779643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:19,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:19,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130779647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:19,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:19,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130779657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:19,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:19,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130779665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:19,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:19,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130779670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:19,732 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:19,733 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T19:25:19,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:19,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:19,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:19,734 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,842 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/9c7dfb291861485daca55953fcc2aada 2024-11-20T19:25:19,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/aef16aaf336c4e19a0c4f4dee3b816af as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/aef16aaf336c4e19a0c4f4dee3b816af 2024-11-20T19:25:19,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T19:25:19,875 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/aef16aaf336c4e19a0c4f4dee3b816af, entries=200, sequenceid=173, filesize=14.2 K 2024-11-20T19:25:19,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/09223248a6014eaf95ffe0aee435cd7d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/09223248a6014eaf95ffe0aee435cd7d 2024-11-20T19:25:19,887 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:19,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T19:25:19,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:19,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:19,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:19,889 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:19,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/09223248a6014eaf95ffe0aee435cd7d, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T19:25:19,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/9c7dfb291861485daca55953fcc2aada as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/9c7dfb291861485daca55953fcc2aada 2024-11-20T19:25:19,932 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/9c7dfb291861485daca55953fcc2aada, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T19:25:19,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6aad06303ed006b601a1faa1a93ab5da in 1698ms, sequenceid=173, compaction requested=false 2024-11-20T19:25:19,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:20,046 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:20,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-20T19:25:20,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:20,052 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:25:20,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:20,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:20,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:20,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:20,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:20,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:20,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/3d9eca8c364a43e0a40098a4f370c534 is 50, key is test_row_0/A:col10/1732130718523/Put/seqid=0 2024-11-20T19:25:20,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741875_1051 (size=12151) 2024-11-20T19:25:20,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T19:25:20,532 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/3d9eca8c364a43e0a40098a4f370c534 2024-11-20T19:25:20,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a2b3e025809b49699ce90c6e4560bdbb is 50, key is test_row_0/B:col10/1732130718523/Put/seqid=0 2024-11-20T19:25:20,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741876_1052 (size=12151) 2024-11-20T19:25:20,625 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a2b3e025809b49699ce90c6e4560bdbb 2024-11-20T19:25:20,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/7fd49b0c099b406b98498afbcbc9f805 is 50, key is test_row_0/C:col10/1732130718523/Put/seqid=0 2024-11-20T19:25:20,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:20,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:20,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741877_1053 (size=12151) 2024-11-20T19:25:20,732 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:20,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130780727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:20,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:20,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130780732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:20,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:20,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130780835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:20,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:20,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130780838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:21,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130781045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:21,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130781046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:21,097 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/7fd49b0c099b406b98498afbcbc9f805 2024-11-20T19:25:21,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/3d9eca8c364a43e0a40098a4f370c534 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/3d9eca8c364a43e0a40098a4f370c534 2024-11-20T19:25:21,123 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/3d9eca8c364a43e0a40098a4f370c534, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T19:25:21,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a2b3e025809b49699ce90c6e4560bdbb as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a2b3e025809b49699ce90c6e4560bdbb 2024-11-20T19:25:21,157 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a2b3e025809b49699ce90c6e4560bdbb, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T19:25:21,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/7fd49b0c099b406b98498afbcbc9f805 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/7fd49b0c099b406b98498afbcbc9f805 2024-11-20T19:25:21,176 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/7fd49b0c099b406b98498afbcbc9f805, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T19:25:21,177 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 6aad06303ed006b601a1faa1a93ab5da in 1125ms, sequenceid=201, compaction requested=true 2024-11-20T19:25:21,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:21,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:21,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-20T19:25:21,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-20T19:25:21,183 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-20T19:25:21,183 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9150 sec 2024-11-20T19:25:21,187 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.9250 sec 2024-11-20T19:25:21,357 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:25:21,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:21,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:21,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:21,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:21,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:21,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:21,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:21,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-20T19:25:21,381 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-20T19:25:21,392 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:21,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-20T19:25:21,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T19:25:21,396 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:21,397 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:21,397 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:21,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/c88f0032495d46168d33c141e3b06ed8 is 50, key is test_row_0/A:col10/1732130720725/Put/seqid=0 2024-11-20T19:25:21,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130781462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:21,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130781463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:21,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741878_1054 (size=14541) 2024-11-20T19:25:21,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T19:25:21,552 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:21,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:21,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:21,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:21,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:21,554 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:21,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:21,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:21,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130781579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:21,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130781578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:21,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130781653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:21,658 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., hostname=db9c3a6c6492,35979,1732130703276, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:21,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130781677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:21,683 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4199 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., hostname=db9c3a6c6492,35979,1732130703276, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:21,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130781678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:21,687 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4212 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., hostname=db9c3a6c6492,35979,1732130703276, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:21,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T19:25:21,726 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:21,727 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:21,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:21,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:21,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:21,728 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:21,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:21,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:21,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130781793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:21,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:21,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130781793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:21,880 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/c88f0032495d46168d33c141e3b06ed8 2024-11-20T19:25:21,882 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:21,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:21,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:21,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:21,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:21,883 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:21,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:21,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:21,923 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/d6e89f21aa8c46f8924c007e78865034 is 50, key is test_row_0/B:col10/1732130720725/Put/seqid=0 2024-11-20T19:25:21,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741879_1055 (size=12151) 2024-11-20T19:25:22,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T19:25:22,037 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:22,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:22,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:22,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,045 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:22,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:22,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130782100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:22,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130782100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:22,201 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:22,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:22,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:22,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,356 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:22,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:22,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:22,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,363 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,388 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/d6e89f21aa8c46f8924c007e78865034 2024-11-20T19:25:22,405 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/747d36516f1e44c5b0ea9e9d6eb04721 is 50, key is test_row_0/C:col10/1732130720725/Put/seqid=0 2024-11-20T19:25:22,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741880_1056 (size=12151) 2024-11-20T19:25:22,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/747d36516f1e44c5b0ea9e9d6eb04721 2024-11-20T19:25:22,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/c88f0032495d46168d33c141e3b06ed8 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/c88f0032495d46168d33c141e3b06ed8 2024-11-20T19:25:22,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T19:25:22,502 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/c88f0032495d46168d33c141e3b06ed8, entries=200, sequenceid=214, filesize=14.2 K 2024-11-20T19:25:22,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/d6e89f21aa8c46f8924c007e78865034 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/d6e89f21aa8c46f8924c007e78865034 2024-11-20T19:25:22,517 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:22,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:22,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:22,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,519 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,521 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/d6e89f21aa8c46f8924c007e78865034, entries=150, sequenceid=214, filesize=11.9 K 2024-11-20T19:25:22,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/747d36516f1e44c5b0ea9e9d6eb04721 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/747d36516f1e44c5b0ea9e9d6eb04721 2024-11-20T19:25:22,542 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/747d36516f1e44c5b0ea9e9d6eb04721, entries=150, sequenceid=214, filesize=11.9 K 2024-11-20T19:25:22,543 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 6aad06303ed006b601a1faa1a93ab5da in 1186ms, sequenceid=214, compaction requested=true 2024-11-20T19:25:22,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:22,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:22,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:22,543 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:22,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:22,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:22,544 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:22,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:22,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:22,546 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53726 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:22,546 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/A is initiating minor compaction (all files) 2024-11-20T19:25:22,546 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/A in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,547 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/233365209b8c435392e6895c7cd2b409, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/aef16aaf336c4e19a0c4f4dee3b816af, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/3d9eca8c364a43e0a40098a4f370c534, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/c88f0032495d46168d33c141e3b06ed8] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=52.5 K 2024-11-20T19:25:22,547 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:22,547 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/B is initiating minor compaction (all files) 2024-11-20T19:25:22,547 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/B in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,547 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 233365209b8c435392e6895c7cd2b409, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732130717073 2024-11-20T19:25:22,547 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a67b3d5dd29d4b549fbdfc30a9841a2f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/09223248a6014eaf95ffe0aee435cd7d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a2b3e025809b49699ce90c6e4560bdbb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/d6e89f21aa8c46f8924c007e78865034] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=47.8 K 2024-11-20T19:25:22,548 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting aef16aaf336c4e19a0c4f4dee3b816af, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732130717483 2024-11-20T19:25:22,548 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a67b3d5dd29d4b549fbdfc30a9841a2f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732130717073 2024-11-20T19:25:22,549 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 09223248a6014eaf95ffe0aee435cd7d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732130717483 2024-11-20T19:25:22,549 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d9eca8c364a43e0a40098a4f370c534, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732130718518 2024-11-20T19:25:22,551 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a2b3e025809b49699ce90c6e4560bdbb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732130718518 2024-11-20T19:25:22,551 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting c88f0032495d46168d33c141e3b06ed8, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732130720725 2024-11-20T19:25:22,551 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting d6e89f21aa8c46f8924c007e78865034, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732130720725 2024-11-20T19:25:22,580 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#B#compaction#42 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:22,581 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/6a00b4073584455a8912074b2a99392a is 50, key is test_row_0/B:col10/1732130720725/Put/seqid=0 2024-11-20T19:25:22,590 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#A#compaction#43 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:22,591 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/ff43d2a624084ca7a9042ee3137f6111 is 50, key is test_row_0/A:col10/1732130720725/Put/seqid=0 2024-11-20T19:25:22,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:22,631 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:25:22,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:22,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:22,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:22,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:22,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:22,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:22,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741882_1058 (size=12629) 2024-11-20T19:25:22,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741881_1057 (size=12629) 2024-11-20T19:25:22,670 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/ff43d2a624084ca7a9042ee3137f6111 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ff43d2a624084ca7a9042ee3137f6111 2024-11-20T19:25:22,670 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/6a00b4073584455a8912074b2a99392a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6a00b4073584455a8912074b2a99392a 2024-11-20T19:25:22,673 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:22,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/e70a0146fc6340338fc403ba16940297 is 50, key is test_row_0/A:col10/1732130721429/Put/seqid=0 2024-11-20T19:25:22,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:22,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:22,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,688 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,689 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/B of 6aad06303ed006b601a1faa1a93ab5da into 6a00b4073584455a8912074b2a99392a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:22,689 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:22,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,689 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/B, priority=12, startTime=1732130722544; duration=0sec 2024-11-20T19:25:22,689 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:22,689 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:B 2024-11-20T19:25:22,690 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/A of 6aad06303ed006b601a1faa1a93ab5da into ff43d2a624084ca7a9042ee3137f6111(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:22,690 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:22,690 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/A, priority=12, startTime=1732130722543; duration=0sec 2024-11-20T19:25:22,690 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:22,690 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:A 2024-11-20T19:25:22,689 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:22,695 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:22,695 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/C is initiating minor compaction (all files) 2024-11-20T19:25:22,695 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/C in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,696 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/86bf254f55d342eb8a6b39b9687152ef, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/9c7dfb291861485daca55953fcc2aada, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/7fd49b0c099b406b98498afbcbc9f805, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/747d36516f1e44c5b0ea9e9d6eb04721] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=47.8 K 2024-11-20T19:25:22,697 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 86bf254f55d342eb8a6b39b9687152ef, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732130717073 2024-11-20T19:25:22,698 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c7dfb291861485daca55953fcc2aada, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732130717483 2024-11-20T19:25:22,698 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fd49b0c099b406b98498afbcbc9f805, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732130718518 2024-11-20T19:25:22,699 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 747d36516f1e44c5b0ea9e9d6eb04721, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732130720725 2024-11-20T19:25:22,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:22,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130782691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:22,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:22,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130782694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:22,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741883_1059 (size=14541) 2024-11-20T19:25:22,745 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#C#compaction#45 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:22,746 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/1df738cbd3a4458c9e0be393e5f1676a is 50, key is test_row_0/C:col10/1732130720725/Put/seqid=0 2024-11-20T19:25:22,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741884_1060 (size=12629) 2024-11-20T19:25:22,799 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/1df738cbd3a4458c9e0be393e5f1676a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/1df738cbd3a4458c9e0be393e5f1676a 2024-11-20T19:25:22,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:22,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130782811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:22,815 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/C of 6aad06303ed006b601a1faa1a93ab5da into 1df738cbd3a4458c9e0be393e5f1676a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:22,815 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:22,816 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/C, priority=12, startTime=1732130722544; duration=0sec 2024-11-20T19:25:22,816 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:22,816 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:C 2024-11-20T19:25:22,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:22,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130782812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:22,840 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:22,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:22,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:22,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,842 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,997 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:22,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:22,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:22,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:22,998 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:22,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:23,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:23,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:23,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:23,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130783017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:23,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130783027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:23,137 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/e70a0146fc6340338fc403ba16940297 2024-11-20T19:25:23,159 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:23,160 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:23,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:23,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:23,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:23,161 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:23,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:23,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/61170bc9f09d443ca8ab11fe5875b5d8 is 50, key is test_row_0/B:col10/1732130721429/Put/seqid=0 2024-11-20T19:25:23,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:23,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741885_1061 (size=12151) 2024-11-20T19:25:23,198 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/61170bc9f09d443ca8ab11fe5875b5d8 2024-11-20T19:25:23,220 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/8bd3e668903543e2911621be4733a7f1 is 50, key is test_row_0/C:col10/1732130721429/Put/seqid=0 2024-11-20T19:25:23,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741886_1062 (size=12151) 2024-11-20T19:25:23,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/8bd3e668903543e2911621be4733a7f1 2024-11-20T19:25:23,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/e70a0146fc6340338fc403ba16940297 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/e70a0146fc6340338fc403ba16940297 2024-11-20T19:25:23,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/e70a0146fc6340338fc403ba16940297, entries=200, sequenceid=240, filesize=14.2 K 2024-11-20T19:25:23,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/61170bc9f09d443ca8ab11fe5875b5d8 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/61170bc9f09d443ca8ab11fe5875b5d8 2024-11-20T19:25:23,316 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:23,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:23,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:23,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:23,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:23,320 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:23,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:23,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:23,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:23,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130783334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:23,344 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/61170bc9f09d443ca8ab11fe5875b5d8, entries=150, sequenceid=240, filesize=11.9 K 2024-11-20T19:25:23,343 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:23,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130783338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:23,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/8bd3e668903543e2911621be4733a7f1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/8bd3e668903543e2911621be4733a7f1 2024-11-20T19:25:23,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/8bd3e668903543e2911621be4733a7f1, entries=150, sequenceid=240, filesize=11.9 K 2024-11-20T19:25:23,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6aad06303ed006b601a1faa1a93ab5da in 744ms, sequenceid=240, compaction requested=false 2024-11-20T19:25:23,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:23,473 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:23,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-20T19:25:23,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:23,475 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T19:25:23,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:23,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:23,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:23,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:23,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:23,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:23,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/c145c015c4a142a48f31240c860f128f is 50, key is test_row_0/A:col10/1732130722690/Put/seqid=0 2024-11-20T19:25:23,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T19:25:23,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741887_1063 (size=12151) 2024-11-20T19:25:23,581 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/c145c015c4a142a48f31240c860f128f 2024-11-20T19:25:23,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a01f64e164cb40eeaa8c74477289dc45 is 50, key is test_row_0/B:col10/1732130722690/Put/seqid=0 2024-11-20T19:25:23,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741888_1064 (size=12151) 2024-11-20T19:25:23,651 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a01f64e164cb40eeaa8c74477289dc45 2024-11-20T19:25:23,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/fa39bb52015c419bb3bdcfa6b2b1874c is 50, key is test_row_0/C:col10/1732130722690/Put/seqid=0 2024-11-20T19:25:23,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741889_1065 (size=12151) 2024-11-20T19:25:23,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:23,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:23,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:23,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130783982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:23,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:23,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130783983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:24,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130784088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:24,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130784090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:24,143 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/fa39bb52015c419bb3bdcfa6b2b1874c 2024-11-20T19:25:24,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/c145c015c4a142a48f31240c860f128f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/c145c015c4a142a48f31240c860f128f 2024-11-20T19:25:24,183 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/c145c015c4a142a48f31240c860f128f, entries=150, sequenceid=253, filesize=11.9 K 2024-11-20T19:25:24,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a01f64e164cb40eeaa8c74477289dc45 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a01f64e164cb40eeaa8c74477289dc45 2024-11-20T19:25:24,197 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a01f64e164cb40eeaa8c74477289dc45, entries=150, sequenceid=253, filesize=11.9 K 2024-11-20T19:25:24,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/fa39bb52015c419bb3bdcfa6b2b1874c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/fa39bb52015c419bb3bdcfa6b2b1874c 2024-11-20T19:25:24,221 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/fa39bb52015c419bb3bdcfa6b2b1874c, entries=150, sequenceid=253, filesize=11.9 K 2024-11-20T19:25:24,228 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 6aad06303ed006b601a1faa1a93ab5da in 748ms, sequenceid=253, compaction requested=true 2024-11-20T19:25:24,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:24,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:24,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-20T19:25:24,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-20T19:25:24,235 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-20T19:25:24,235 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8340 sec 2024-11-20T19:25:24,238 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 2.8440 sec 2024-11-20T19:25:24,297 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T19:25:24,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:24,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:24,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:24,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:24,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:24,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:24,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:24,343 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/1671b638be714796b96d942f6b2f4b1d is 50, key is test_row_0/A:col10/1732130723979/Put/seqid=0 2024-11-20T19:25:24,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130784345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:24,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130784348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:24,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741890_1066 (size=14741) 2024-11-20T19:25:24,373 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/1671b638be714796b96d942f6b2f4b1d 2024-11-20T19:25:24,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/dd02e6000d1c49c2ad38b4f29858c646 is 50, key is test_row_0/B:col10/1732130723979/Put/seqid=0 2024-11-20T19:25:24,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130784451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:24,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741891_1067 (size=12301) 2024-11-20T19:25:24,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/dd02e6000d1c49c2ad38b4f29858c646 2024-11-20T19:25:24,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130784455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:24,490 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/0739eb89942041038cf577ac58a58534 is 50, key is test_row_0/C:col10/1732130723979/Put/seqid=0 2024-11-20T19:25:24,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741892_1068 (size=12301) 2024-11-20T19:25:24,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130784655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:24,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130784666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:24,948 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/0739eb89942041038cf577ac58a58534 2024-11-20T19:25:24,962 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/1671b638be714796b96d942f6b2f4b1d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/1671b638be714796b96d942f6b2f4b1d 2024-11-20T19:25:24,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130784963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:24,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/1671b638be714796b96d942f6b2f4b1d, entries=200, sequenceid=280, filesize=14.4 K 2024-11-20T19:25:24,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:24,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130784973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:24,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/dd02e6000d1c49c2ad38b4f29858c646 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/dd02e6000d1c49c2ad38b4f29858c646 2024-11-20T19:25:24,990 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/dd02e6000d1c49c2ad38b4f29858c646, entries=150, sequenceid=280, filesize=12.0 K 2024-11-20T19:25:24,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/0739eb89942041038cf577ac58a58534 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/0739eb89942041038cf577ac58a58534 2024-11-20T19:25:25,002 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/0739eb89942041038cf577ac58a58534, entries=150, sequenceid=280, filesize=12.0 K 2024-11-20T19:25:25,004 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 6aad06303ed006b601a1faa1a93ab5da in 707ms, sequenceid=280, compaction requested=true 2024-11-20T19:25:25,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:25,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:25,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:25,004 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:25,004 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:25,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:25,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:25,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:25,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:25,006 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54062 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:25,006 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/A is initiating minor compaction (all files) 2024-11-20T19:25:25,007 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/A in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:25,007 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ff43d2a624084ca7a9042ee3137f6111, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/e70a0146fc6340338fc403ba16940297, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/c145c015c4a142a48f31240c860f128f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/1671b638be714796b96d942f6b2f4b1d] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=52.8 K 2024-11-20T19:25:25,007 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49232 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:25,007 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/B is initiating minor compaction (all files) 2024-11-20T19:25:25,007 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/B in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:25,008 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6a00b4073584455a8912074b2a99392a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/61170bc9f09d443ca8ab11fe5875b5d8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a01f64e164cb40eeaa8c74477289dc45, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/dd02e6000d1c49c2ad38b4f29858c646] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=48.1 K 2024-11-20T19:25:25,008 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff43d2a624084ca7a9042ee3137f6111, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732130720725 2024-11-20T19:25:25,008 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a00b4073584455a8912074b2a99392a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732130720725 2024-11-20T19:25:25,008 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting e70a0146fc6340338fc403ba16940297, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732130721429 2024-11-20T19:25:25,009 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 61170bc9f09d443ca8ab11fe5875b5d8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732130721429 2024-11-20T19:25:25,009 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting c145c015c4a142a48f31240c860f128f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732130722651 2024-11-20T19:25:25,009 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a01f64e164cb40eeaa8c74477289dc45, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732130722651 2024-11-20T19:25:25,010 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting dd02e6000d1c49c2ad38b4f29858c646, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732130723940 2024-11-20T19:25:25,011 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1671b638be714796b96d942f6b2f4b1d, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732130723940 2024-11-20T19:25:25,036 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#B#compaction#54 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:25,036 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/31fd5bc6508041498fa075869c167fc0 is 50, key is test_row_0/B:col10/1732130723979/Put/seqid=0 2024-11-20T19:25:25,044 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#A#compaction#55 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:25,045 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/d71b072a8a154fc2abf495f3db5360ee is 50, key is test_row_0/A:col10/1732130723979/Put/seqid=0 2024-11-20T19:25:25,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741894_1070 (size=12915) 2024-11-20T19:25:25,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741893_1069 (size=12915) 2024-11-20T19:25:25,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:25,484 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:25,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:25,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:25,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:25,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:25,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:25,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:25,516 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/33e9575f2dfe4f248aa8c99be594a855 is 50, key is test_row_0/A:col10/1732130725478/Put/seqid=0 2024-11-20T19:25:25,525 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/d71b072a8a154fc2abf495f3db5360ee as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/d71b072a8a154fc2abf495f3db5360ee 2024-11-20T19:25:25,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-20T19:25:25,529 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-20T19:25:25,532 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:25,536 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/31fd5bc6508041498fa075869c167fc0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/31fd5bc6508041498fa075869c167fc0 2024-11-20T19:25:25,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-20T19:25:25,541 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:25,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T19:25:25,541 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/A of 6aad06303ed006b601a1faa1a93ab5da into d71b072a8a154fc2abf495f3db5360ee(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:25,542 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:25,542 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/A, priority=12, startTime=1732130725004; duration=0sec 2024-11-20T19:25:25,542 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:25,542 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:A 2024-11-20T19:25:25,542 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:25,544 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:25,544 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:25,552 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/B of 6aad06303ed006b601a1faa1a93ab5da into 31fd5bc6508041498fa075869c167fc0(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:25,552 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:25,552 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/B, priority=12, startTime=1732130725004; duration=0sec 2024-11-20T19:25:25,552 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:25,552 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:B 2024-11-20T19:25:25,553 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49232 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:25,553 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/C is initiating minor compaction (all files) 2024-11-20T19:25:25,553 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/C in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:25,553 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/1df738cbd3a4458c9e0be393e5f1676a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/8bd3e668903543e2911621be4733a7f1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/fa39bb52015c419bb3bdcfa6b2b1874c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/0739eb89942041038cf577ac58a58534] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=48.1 K 2024-11-20T19:25:25,554 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1df738cbd3a4458c9e0be393e5f1676a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732130720725 2024-11-20T19:25:25,557 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8bd3e668903543e2911621be4733a7f1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732130721429 2024-11-20T19:25:25,558 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa39bb52015c419bb3bdcfa6b2b1874c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1732130722651 2024-11-20T19:25:25,559 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0739eb89942041038cf577ac58a58534, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732130723940 2024-11-20T19:25:25,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741895_1071 (size=12297) 2024-11-20T19:25:25,564 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/33e9575f2dfe4f248aa8c99be594a855 2024-11-20T19:25:25,588 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#C#compaction#57 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:25,589 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/b2d1bbbf672d49de90aa960a5c90a4c8 is 50, key is test_row_0/C:col10/1732130723979/Put/seqid=0 2024-11-20T19:25:25,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/6498bc87a3e94d2e9d04ed1d249806f9 is 50, key is test_row_0/B:col10/1732130725478/Put/seqid=0 2024-11-20T19:25:25,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130785620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T19:25:25,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130785635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:25,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741897_1073 (size=9857) 2024-11-20T19:25:25,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/6498bc87a3e94d2e9d04ed1d249806f9 2024-11-20T19:25:25,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741896_1072 (size=12915) 2024-11-20T19:25:25,691 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/b2d1bbbf672d49de90aa960a5c90a4c8 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/b2d1bbbf672d49de90aa960a5c90a4c8 2024-11-20T19:25:25,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1732130785682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:25,696 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8209 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., hostname=db9c3a6c6492,35979,1732130703276, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:25,697 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/95f96acd99764117a2b5405d0abdfdf0 is 50, key is test_row_0/C:col10/1732130725478/Put/seqid=0 2024-11-20T19:25:25,698 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:25,699 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T19:25:25,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:25,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:25,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:25,700 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:25,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:25,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:25,704 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/C of 6aad06303ed006b601a1faa1a93ab5da into b2d1bbbf672d49de90aa960a5c90a4c8(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:25,704 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:25,704 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/C, priority=12, startTime=1732130725005; duration=0sec 2024-11-20T19:25:25,704 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:25,704 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:C 2024-11-20T19:25:25,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1732130785704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:25,711 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8228 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., hostname=db9c3a6c6492,35979,1732130703276, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:25,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130785729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:25,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53960 deadline: 1732130785726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:25,733 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8258 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., hostname=db9c3a6c6492,35979,1732130703276, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:25,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:25,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130785752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:25,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741898_1074 (size=9857) 2024-11-20T19:25:25,757 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/95f96acd99764117a2b5405d0abdfdf0 2024-11-20T19:25:25,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/33e9575f2dfe4f248aa8c99be594a855 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/33e9575f2dfe4f248aa8c99be594a855 2024-11-20T19:25:25,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/33e9575f2dfe4f248aa8c99be594a855, entries=150, sequenceid=291, filesize=12.0 K 2024-11-20T19:25:25,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/6498bc87a3e94d2e9d04ed1d249806f9 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6498bc87a3e94d2e9d04ed1d249806f9 2024-11-20T19:25:25,794 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6498bc87a3e94d2e9d04ed1d249806f9, entries=100, sequenceid=291, filesize=9.6 K 2024-11-20T19:25:25,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/95f96acd99764117a2b5405d0abdfdf0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/95f96acd99764117a2b5405d0abdfdf0 2024-11-20T19:25:25,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/95f96acd99764117a2b5405d0abdfdf0, entries=100, sequenceid=291, filesize=9.6 K 2024-11-20T19:25:25,806 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6aad06303ed006b601a1faa1a93ab5da in 322ms, sequenceid=291, compaction requested=false 2024-11-20T19:25:25,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:25,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T19:25:25,854 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:25,854 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-20T19:25:25,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:25,855 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:25:25,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:25,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:25,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:25,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:25,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:25,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:25,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/38c6c104ec8641a98acca319506f371f is 50, key is test_row_0/A:col10/1732130725592/Put/seqid=0 2024-11-20T19:25:25,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741899_1075 (size=12301) 2024-11-20T19:25:25,925 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/38c6c104ec8641a98acca319506f371f 2024-11-20T19:25:25,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:25,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:25,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a20e2591c21543c2a624d0fe9d69b8fa is 50, key is test_row_0/B:col10/1732130725592/Put/seqid=0 2024-11-20T19:25:26,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:26,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130785993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:26,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:26,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130786002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:26,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741900_1076 (size=12301) 2024-11-20T19:25:26,041 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a20e2591c21543c2a624d0fe9d69b8fa 2024-11-20T19:25:26,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/cccceabb177b47a5a861ec8fae197555 is 50, key is test_row_0/C:col10/1732130725592/Put/seqid=0 2024-11-20T19:25:26,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:26,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130786105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:26,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:26,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130786116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:26,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741901_1077 (size=12301) 2024-11-20T19:25:26,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T19:25:26,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:26,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130786311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:26,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:26,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130786327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:26,528 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/cccceabb177b47a5a861ec8fae197555 2024-11-20T19:25:26,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/38c6c104ec8641a98acca319506f371f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/38c6c104ec8641a98acca319506f371f 2024-11-20T19:25:26,543 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/38c6c104ec8641a98acca319506f371f, entries=150, sequenceid=319, filesize=12.0 K 2024-11-20T19:25:26,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a20e2591c21543c2a624d0fe9d69b8fa as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a20e2591c21543c2a624d0fe9d69b8fa 2024-11-20T19:25:26,556 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a20e2591c21543c2a624d0fe9d69b8fa, entries=150, sequenceid=319, filesize=12.0 K 2024-11-20T19:25:26,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/cccceabb177b47a5a861ec8fae197555 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/cccceabb177b47a5a861ec8fae197555 2024-11-20T19:25:26,567 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/cccceabb177b47a5a861ec8fae197555, entries=150, sequenceid=319, filesize=12.0 K 2024-11-20T19:25:26,568 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 6aad06303ed006b601a1faa1a93ab5da in 713ms, sequenceid=319, compaction requested=true 2024-11-20T19:25:26,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:26,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:26,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-20T19:25:26,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-20T19:25:26,577 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-20T19:25:26,577 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0270 sec 2024-11-20T19:25:26,579 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.0450 sec 2024-11-20T19:25:26,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:26,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:25:26,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:26,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:26,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:26,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:26,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:26,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:26,636 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/ef858e74d9a545578f91c4e196c9696d is 50, key is test_row_0/A:col10/1732130726622/Put/seqid=0 2024-11-20T19:25:26,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-20T19:25:26,649 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-20T19:25:26,659 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:26,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-20T19:25:26,662 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:26,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T19:25:26,663 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:26,664 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:26,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741902_1078 (size=12301) 2024-11-20T19:25:26,681 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/ef858e74d9a545578f91c4e196c9696d 2024-11-20T19:25:26,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a438e011128d485785f4facf067aae91 is 50, key is test_row_0/B:col10/1732130726622/Put/seqid=0 2024-11-20T19:25:26,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:26,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130786744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:26,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:26,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130786751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:26,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741903_1079 (size=12301) 2024-11-20T19:25:26,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a438e011128d485785f4facf067aae91 2024-11-20T19:25:26,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T19:25:26,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/a92fe3702ef7462396ba344973c03e08 is 50, key is test_row_0/C:col10/1732130726622/Put/seqid=0 2024-11-20T19:25:26,816 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:26,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:26,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:26,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:26,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:26,817 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741904_1080 (size=12301) 2024-11-20T19:25:26,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:26,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130786860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:26,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:26,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130786862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:26,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T19:25:26,971 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:26,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:26,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:26,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:26,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:26,973 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:26,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:27,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130787068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:27,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:27,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130787079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:27,126 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:27,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:27,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:27,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:27,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:27,128 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,265 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/a92fe3702ef7462396ba344973c03e08 2024-11-20T19:25:27,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T19:25:27,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/ef858e74d9a545578f91c4e196c9696d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ef858e74d9a545578f91c4e196c9696d 2024-11-20T19:25:27,282 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:27,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:27,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:27,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:27,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:27,286 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ef858e74d9a545578f91c4e196c9696d, entries=150, sequenceid=331, filesize=12.0 K 2024-11-20T19:25:27,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/a438e011128d485785f4facf067aae91 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a438e011128d485785f4facf067aae91 2024-11-20T19:25:27,321 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a438e011128d485785f4facf067aae91, entries=150, sequenceid=331, filesize=12.0 K 2024-11-20T19:25:27,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/a92fe3702ef7462396ba344973c03e08 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/a92fe3702ef7462396ba344973c03e08 2024-11-20T19:25:27,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/a92fe3702ef7462396ba344973c03e08, entries=150, sequenceid=331, filesize=12.0 K 2024-11-20T19:25:27,338 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6aad06303ed006b601a1faa1a93ab5da in 715ms, sequenceid=331, compaction requested=true 2024-11-20T19:25:27,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:27,338 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:27,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:27,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:27,340 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:27,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:27,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:27,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:27,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:27,341 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49814 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:27,341 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/A is initiating minor compaction (all files) 2024-11-20T19:25:27,341 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/A in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:27,341 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/d71b072a8a154fc2abf495f3db5360ee, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/33e9575f2dfe4f248aa8c99be594a855, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/38c6c104ec8641a98acca319506f371f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ef858e74d9a545578f91c4e196c9696d] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=48.6 K 2024-11-20T19:25:27,343 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d71b072a8a154fc2abf495f3db5360ee, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732130723940 2024-11-20T19:25:27,344 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33e9575f2dfe4f248aa8c99be594a855, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732130724345 2024-11-20T19:25:27,345 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47374 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:27,345 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/B is initiating minor compaction (all files) 2024-11-20T19:25:27,345 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/B in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:27,345 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/31fd5bc6508041498fa075869c167fc0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6498bc87a3e94d2e9d04ed1d249806f9, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a20e2591c21543c2a624d0fe9d69b8fa, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a438e011128d485785f4facf067aae91] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=46.3 K 2024-11-20T19:25:27,346 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 31fd5bc6508041498fa075869c167fc0, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732130723940 2024-11-20T19:25:27,346 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38c6c104ec8641a98acca319506f371f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732130725592 2024-11-20T19:25:27,347 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 6498bc87a3e94d2e9d04ed1d249806f9, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732130725478 2024-11-20T19:25:27,347 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef858e74d9a545578f91c4e196c9696d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732130725964 2024-11-20T19:25:27,348 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a20e2591c21543c2a624d0fe9d69b8fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732130725592 2024-11-20T19:25:27,348 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a438e011128d485785f4facf067aae91, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732130725964 2024-11-20T19:25:27,376 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#B#compaction#66 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:27,377 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/c32dd38b02b94f3eb09a2491c91775f5 is 50, key is test_row_0/B:col10/1732130726622/Put/seqid=0 2024-11-20T19:25:27,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:27,385 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:25:27,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:27,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:27,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:27,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:27,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:27,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:27,390 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#A#compaction#67 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:27,391 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/61b9a5bde3ae4f5baea40a1a04976c2a is 50, key is test_row_0/A:col10/1732130726622/Put/seqid=0 2024-11-20T19:25:27,425 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/cc4ae9151b5e4c63b2ff94baf2cc942b is 50, key is test_row_0/A:col10/1732130727377/Put/seqid=0 2024-11-20T19:25:27,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:27,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130787435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:27,440 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:27,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:27,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:27,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130787435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:27,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:27,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:27,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:27,445 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741905_1081 (size=13051) 2024-11-20T19:25:27,474 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/c32dd38b02b94f3eb09a2491c91775f5 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/c32dd38b02b94f3eb09a2491c91775f5 2024-11-20T19:25:27,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741906_1082 (size=13051) 2024-11-20T19:25:27,487 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/B of 6aad06303ed006b601a1faa1a93ab5da into c32dd38b02b94f3eb09a2491c91775f5(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:27,487 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:27,487 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/B, priority=12, startTime=1732130727340; duration=0sec 2024-11-20T19:25:27,487 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:27,487 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:B 2024-11-20T19:25:27,487 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:27,488 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/61b9a5bde3ae4f5baea40a1a04976c2a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/61b9a5bde3ae4f5baea40a1a04976c2a 2024-11-20T19:25:27,492 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47374 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:27,493 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/C is initiating minor compaction (all files) 2024-11-20T19:25:27,493 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/C in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:27,493 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/b2d1bbbf672d49de90aa960a5c90a4c8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/95f96acd99764117a2b5405d0abdfdf0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/cccceabb177b47a5a861ec8fae197555, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/a92fe3702ef7462396ba344973c03e08] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=46.3 K 2024-11-20T19:25:27,495 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting b2d1bbbf672d49de90aa960a5c90a4c8, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732130723940 2024-11-20T19:25:27,496 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 95f96acd99764117a2b5405d0abdfdf0, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732130725478 2024-11-20T19:25:27,499 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting cccceabb177b47a5a861ec8fae197555, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732130725592 2024-11-20T19:25:27,501 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a92fe3702ef7462396ba344973c03e08, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732130725964 2024-11-20T19:25:27,506 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/A of 6aad06303ed006b601a1faa1a93ab5da into 61b9a5bde3ae4f5baea40a1a04976c2a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:27,506 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:27,506 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/A, priority=12, startTime=1732130727338; duration=0sec 2024-11-20T19:25:27,506 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:27,506 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:A 2024-11-20T19:25:27,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741907_1083 (size=14741) 2024-11-20T19:25:27,522 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/cc4ae9151b5e4c63b2ff94baf2cc942b 2024-11-20T19:25:27,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:27,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130787541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:27,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:27,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130787542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:27,547 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#C#compaction#69 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:27,549 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/651b0ebb088646479bb78ccf9a7d4ebb is 50, key is test_row_0/C:col10/1732130726622/Put/seqid=0 2024-11-20T19:25:27,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/060efee4bd654115962aad0215b44ceb is 50, key is test_row_0/B:col10/1732130727377/Put/seqid=0 2024-11-20T19:25:27,599 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:27,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:27,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:27,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:27,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:27,601 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741908_1084 (size=13051) 2024-11-20T19:25:27,637 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/651b0ebb088646479bb78ccf9a7d4ebb as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/651b0ebb088646479bb78ccf9a7d4ebb 2024-11-20T19:25:27,655 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/C of 6aad06303ed006b601a1faa1a93ab5da into 651b0ebb088646479bb78ccf9a7d4ebb(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:27,655 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:27,655 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/C, priority=12, startTime=1732130727340; duration=0sec 2024-11-20T19:25:27,655 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:27,655 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:C 2024-11-20T19:25:27,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741909_1085 (size=12301) 2024-11-20T19:25:27,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:27,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130787746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:27,748 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130787748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:27,755 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:27,755 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:27,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:27,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:27,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:27,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T19:25:27,911 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:27,911 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:27,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:27,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:27,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:27,912 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:27,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:28,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:28,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130788050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:28,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130788050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:28,060 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/060efee4bd654115962aad0215b44ceb 2024-11-20T19:25:28,064 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:28,065 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:28,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:28,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:28,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:28,065 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,081 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/60617ec004f44e9381c87efc81e67f04 is 50, key is test_row_0/C:col10/1732130727377/Put/seqid=0 2024-11-20T19:25:28,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741910_1086 (size=12301) 2024-11-20T19:25:28,220 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:28,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:28,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:28,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:28,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:28,221 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,374 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:28,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:28,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:28,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:28,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:28,375 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,529 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:28,530 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:28,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:28,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:28,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:28,531 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:28,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=356 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/60617ec004f44e9381c87efc81e67f04 2024-11-20T19:25:28,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/cc4ae9151b5e4c63b2ff94baf2cc942b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/cc4ae9151b5e4c63b2ff94baf2cc942b 2024-11-20T19:25:28,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:28,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130788561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:28,569 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/cc4ae9151b5e4c63b2ff94baf2cc942b, entries=200, sequenceid=356, filesize=14.4 K 2024-11-20T19:25:28,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:28,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130788563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:28,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/060efee4bd654115962aad0215b44ceb as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/060efee4bd654115962aad0215b44ceb 2024-11-20T19:25:28,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/060efee4bd654115962aad0215b44ceb, entries=150, sequenceid=356, filesize=12.0 K 2024-11-20T19:25:28,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/60617ec004f44e9381c87efc81e67f04 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/60617ec004f44e9381c87efc81e67f04 2024-11-20T19:25:28,602 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/60617ec004f44e9381c87efc81e67f04, entries=150, sequenceid=356, filesize=12.0 K 2024-11-20T19:25:28,603 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 6aad06303ed006b601a1faa1a93ab5da in 1218ms, sequenceid=356, compaction requested=false 2024-11-20T19:25:28,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:28,684 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:28,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-20T19:25:28,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:28,685 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:25:28,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:28,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:28,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:28,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:28,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:28,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:28,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/487885383fe74daf902833275007bcbd is 50, key is test_row_0/A:col10/1732130727432/Put/seqid=0 2024-11-20T19:25:28,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741911_1087 (size=12301) 2024-11-20T19:25:28,716 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/487885383fe74daf902833275007bcbd 2024-11-20T19:25:28,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/efa2c6fb84c046e9a28843f1d704aca2 is 50, key is test_row_0/B:col10/1732130727432/Put/seqid=0 2024-11-20T19:25:28,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741912_1088 (size=12301) 2024-11-20T19:25:28,754 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/efa2c6fb84c046e9a28843f1d704aca2 2024-11-20T19:25:28,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/f237f1feb5d34588ad6f9e938589b57c is 50, key is test_row_0/C:col10/1732130727432/Put/seqid=0 2024-11-20T19:25:28,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T19:25:28,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741913_1089 (size=12301) 2024-11-20T19:25:29,190 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/f237f1feb5d34588ad6f9e938589b57c 2024-11-20T19:25:29,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/487885383fe74daf902833275007bcbd as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/487885383fe74daf902833275007bcbd 2024-11-20T19:25:29,201 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/487885383fe74daf902833275007bcbd, entries=150, sequenceid=371, filesize=12.0 K 2024-11-20T19:25:29,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/efa2c6fb84c046e9a28843f1d704aca2 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/efa2c6fb84c046e9a28843f1d704aca2 2024-11-20T19:25:29,216 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/efa2c6fb84c046e9a28843f1d704aca2, entries=150, sequenceid=371, filesize=12.0 K 2024-11-20T19:25:29,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/f237f1feb5d34588ad6f9e938589b57c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f237f1feb5d34588ad6f9e938589b57c 2024-11-20T19:25:29,224 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f237f1feb5d34588ad6f9e938589b57c, entries=150, sequenceid=371, filesize=12.0 K 2024-11-20T19:25:29,225 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=0 B/0 for 6aad06303ed006b601a1faa1a93ab5da in 540ms, sequenceid=371, compaction requested=true 2024-11-20T19:25:29,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:29,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:29,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-20T19:25:29,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-20T19:25:29,229 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-20T19:25:29,229 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5630 sec 2024-11-20T19:25:29,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 2.5710 sec 2024-11-20T19:25:29,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:29,586 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:29,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:29,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:29,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:29,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:29,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:29,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:29,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/63eba2bb3fec4ed18adf3f139f20226b is 50, key is test_row_0/A:col10/1732130729582/Put/seqid=0 2024-11-20T19:25:29,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741914_1090 (size=14741) 2024-11-20T19:25:29,617 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/63eba2bb3fec4ed18adf3f139f20226b 2024-11-20T19:25:29,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/83e37b5dfc234602af11333c5d53c17a is 50, key is test_row_0/B:col10/1732130729582/Put/seqid=0 2024-11-20T19:25:29,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741915_1091 (size=12301) 2024-11-20T19:25:29,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:29,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130789637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:29,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:29,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130789640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:29,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/83e37b5dfc234602af11333c5d53c17a 2024-11-20T19:25:29,657 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/120fe93aa76d43d9aab3cb67f1dd81b5 is 50, key is test_row_0/C:col10/1732130729582/Put/seqid=0 2024-11-20T19:25:29,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741916_1092 (size=12301) 2024-11-20T19:25:29,665 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/120fe93aa76d43d9aab3cb67f1dd81b5 2024-11-20T19:25:29,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/63eba2bb3fec4ed18adf3f139f20226b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/63eba2bb3fec4ed18adf3f139f20226b 2024-11-20T19:25:29,677 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/63eba2bb3fec4ed18adf3f139f20226b, entries=200, sequenceid=383, filesize=14.4 K 2024-11-20T19:25:29,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/83e37b5dfc234602af11333c5d53c17a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/83e37b5dfc234602af11333c5d53c17a 2024-11-20T19:25:29,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/83e37b5dfc234602af11333c5d53c17a, entries=150, sequenceid=383, filesize=12.0 K 2024-11-20T19:25:29,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/120fe93aa76d43d9aab3cb67f1dd81b5 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/120fe93aa76d43d9aab3cb67f1dd81b5 2024-11-20T19:25:29,701 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/120fe93aa76d43d9aab3cb67f1dd81b5, entries=150, sequenceid=383, filesize=12.0 K 2024-11-20T19:25:29,702 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6aad06303ed006b601a1faa1a93ab5da in 116ms, sequenceid=383, compaction requested=true 2024-11-20T19:25:29,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:29,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:29,703 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:29,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:29,703 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:29,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:29,705 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54834 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:29,705 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:29,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:29,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:29,705 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/B is initiating minor compaction (all files) 2024-11-20T19:25:29,705 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/A is initiating minor compaction (all files) 2024-11-20T19:25:29,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:29,705 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/B in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:29,705 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/A in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:29,705 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/61b9a5bde3ae4f5baea40a1a04976c2a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/cc4ae9151b5e4c63b2ff94baf2cc942b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/487885383fe74daf902833275007bcbd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/63eba2bb3fec4ed18adf3f139f20226b] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=53.5 K 2024-11-20T19:25:29,705 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/c32dd38b02b94f3eb09a2491c91775f5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/060efee4bd654115962aad0215b44ceb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/efa2c6fb84c046e9a28843f1d704aca2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/83e37b5dfc234602af11333c5d53c17a] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=48.8 K 2024-11-20T19:25:29,706 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61b9a5bde3ae4f5baea40a1a04976c2a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732130725964 2024-11-20T19:25:29,706 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting c32dd38b02b94f3eb09a2491c91775f5, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732130725964 2024-11-20T19:25:29,707 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 060efee4bd654115962aad0215b44ceb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732130726741 2024-11-20T19:25:29,707 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc4ae9151b5e4c63b2ff94baf2cc942b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732130726723 2024-11-20T19:25:29,707 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 487885383fe74daf902833275007bcbd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732130727425 2024-11-20T19:25:29,707 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting efa2c6fb84c046e9a28843f1d704aca2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732130727425 2024-11-20T19:25:29,708 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63eba2bb3fec4ed18adf3f139f20226b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732130729582 2024-11-20T19:25:29,708 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 83e37b5dfc234602af11333c5d53c17a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732130729582 2024-11-20T19:25:29,723 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#B#compaction#78 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:29,723 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/4b6ac13db7044002a605f08de335c68c is 50, key is test_row_0/B:col10/1732130729582/Put/seqid=0 2024-11-20T19:25:29,725 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#A#compaction#79 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:29,726 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/336a283aa9d04b1fa4c1480893262671 is 50, key is test_row_0/A:col10/1732130729582/Put/seqid=0 2024-11-20T19:25:29,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741918_1094 (size=13187) 2024-11-20T19:25:29,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:29,745 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:25:29,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:29,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:29,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:29,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:29,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:29,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:29,755 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/2998c43586ec493d8fcc590335008b86 is 50, key is test_row_0/A:col10/1732130729744/Put/seqid=0 2024-11-20T19:25:29,760 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/336a283aa9d04b1fa4c1480893262671 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/336a283aa9d04b1fa4c1480893262671 2024-11-20T19:25:29,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741917_1093 (size=13187) 2024-11-20T19:25:29,771 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/A of 6aad06303ed006b601a1faa1a93ab5da into 336a283aa9d04b1fa4c1480893262671(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:29,771 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:29,771 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/A, priority=12, startTime=1732130729703; duration=0sec 2024-11-20T19:25:29,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:29,771 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:29,771 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:A 2024-11-20T19:25:29,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130789766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:29,772 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:29,775 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:29,775 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/4b6ac13db7044002a605f08de335c68c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/4b6ac13db7044002a605f08de335c68c 2024-11-20T19:25:29,775 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/C is initiating minor compaction (all files) 2024-11-20T19:25:29,776 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/C in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:29,776 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/651b0ebb088646479bb78ccf9a7d4ebb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/60617ec004f44e9381c87efc81e67f04, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f237f1feb5d34588ad6f9e938589b57c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/120fe93aa76d43d9aab3cb67f1dd81b5] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=48.8 K 2024-11-20T19:25:29,777 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 651b0ebb088646479bb78ccf9a7d4ebb, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1732130725964 2024-11-20T19:25:29,778 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60617ec004f44e9381c87efc81e67f04, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=356, earliestPutTs=1732130726741 2024-11-20T19:25:29,779 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting f237f1feb5d34588ad6f9e938589b57c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732130727425 2024-11-20T19:25:29,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:29,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130789771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:29,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741919_1095 (size=14741) 2024-11-20T19:25:29,782 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 120fe93aa76d43d9aab3cb67f1dd81b5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732130729582 2024-11-20T19:25:29,783 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/2998c43586ec493d8fcc590335008b86 2024-11-20T19:25:29,784 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/B of 6aad06303ed006b601a1faa1a93ab5da into 4b6ac13db7044002a605f08de335c68c(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:29,784 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:29,784 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/B, priority=12, startTime=1732130729703; duration=0sec 2024-11-20T19:25:29,785 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:29,785 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:B 2024-11-20T19:25:29,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/dd770c9cfa284a8ebebe13b3b66b6aea is 50, key is test_row_0/B:col10/1732130729744/Put/seqid=0 2024-11-20T19:25:29,809 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#C#compaction#82 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:29,809 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/7678e5a91dce4be183e4bfc0ec7a26a2 is 50, key is test_row_0/C:col10/1732130729582/Put/seqid=0 2024-11-20T19:25:29,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741920_1096 (size=12301) 2024-11-20T19:25:29,831 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/dd770c9cfa284a8ebebe13b3b66b6aea 2024-11-20T19:25:29,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741921_1097 (size=13187) 2024-11-20T19:25:29,851 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/7678e5a91dce4be183e4bfc0ec7a26a2 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/7678e5a91dce4be183e4bfc0ec7a26a2 2024-11-20T19:25:29,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/1149352d72bc46448592bf360be4d32e is 50, key is test_row_0/C:col10/1732130729744/Put/seqid=0 2024-11-20T19:25:29,860 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/C of 6aad06303ed006b601a1faa1a93ab5da into 7678e5a91dce4be183e4bfc0ec7a26a2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:29,860 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:29,860 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/C, priority=12, startTime=1732130729705; duration=0sec 2024-11-20T19:25:29,860 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:29,860 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:C 2024-11-20T19:25:29,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:29,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130789873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:29,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741922_1098 (size=12301) 2024-11-20T19:25:29,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:29,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130789881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:30,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:30,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130790076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:30,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:30,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130790084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:30,281 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/1149352d72bc46448592bf360be4d32e 2024-11-20T19:25:30,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/2998c43586ec493d8fcc590335008b86 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/2998c43586ec493d8fcc590335008b86 2024-11-20T19:25:30,299 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/2998c43586ec493d8fcc590335008b86, entries=200, sequenceid=409, filesize=14.4 K 2024-11-20T19:25:30,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/dd770c9cfa284a8ebebe13b3b66b6aea as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/dd770c9cfa284a8ebebe13b3b66b6aea 2024-11-20T19:25:30,308 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/dd770c9cfa284a8ebebe13b3b66b6aea, entries=150, sequenceid=409, filesize=12.0 K 2024-11-20T19:25:30,309 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/1149352d72bc46448592bf360be4d32e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/1149352d72bc46448592bf360be4d32e 2024-11-20T19:25:30,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/1149352d72bc46448592bf360be4d32e, entries=150, sequenceid=409, filesize=12.0 K 2024-11-20T19:25:30,320 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6aad06303ed006b601a1faa1a93ab5da in 575ms, sequenceid=409, compaction requested=false 2024-11-20T19:25:30,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:30,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:30,383 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:30,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:30,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:30,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:30,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:30,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:30,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:30,394 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/733c0fd3cc1344b1aa801ca195ad7ec6 is 50, key is test_row_0/A:col10/1732130730382/Put/seqid=0 2024-11-20T19:25:30,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741923_1099 (size=12297) 2024-11-20T19:25:30,402 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/733c0fd3cc1344b1aa801ca195ad7ec6 2024-11-20T19:25:30,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/c59179a637624909b3191dba8bc2553a is 50, key is test_row_0/B:col10/1732130730382/Put/seqid=0 2024-11-20T19:25:30,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:30,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130790441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:30,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:30,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741924_1100 (size=9857) 2024-11-20T19:25:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130790442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:30,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/c59179a637624909b3191dba8bc2553a 2024-11-20T19:25:30,459 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/36ab8712d9b340bbae6d9d94256a1c69 is 50, key is test_row_0/C:col10/1732130730382/Put/seqid=0 2024-11-20T19:25:30,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741925_1101 (size=9857) 2024-11-20T19:25:30,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/36ab8712d9b340bbae6d9d94256a1c69 2024-11-20T19:25:30,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/733c0fd3cc1344b1aa801ca195ad7ec6 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/733c0fd3cc1344b1aa801ca195ad7ec6 2024-11-20T19:25:30,490 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/733c0fd3cc1344b1aa801ca195ad7ec6, entries=150, sequenceid=423, filesize=12.0 K 2024-11-20T19:25:30,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/c59179a637624909b3191dba8bc2553a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/c59179a637624909b3191dba8bc2553a 2024-11-20T19:25:30,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/c59179a637624909b3191dba8bc2553a, entries=100, sequenceid=423, filesize=9.6 K 2024-11-20T19:25:30,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/36ab8712d9b340bbae6d9d94256a1c69 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/36ab8712d9b340bbae6d9d94256a1c69 2024-11-20T19:25:30,511 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/36ab8712d9b340bbae6d9d94256a1c69, entries=100, sequenceid=423, filesize=9.6 K 2024-11-20T19:25:30,512 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6aad06303ed006b601a1faa1a93ab5da in 129ms, sequenceid=423, compaction requested=true 2024-11-20T19:25:30,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:30,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:30,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:30,513 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:30,513 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:30,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:30,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:30,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6aad06303ed006b601a1faa1a93ab5da:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:30,513 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:30,514 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40225 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:30,514 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/A is initiating minor compaction (all files) 2024-11-20T19:25:30,514 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/A in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:30,515 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/336a283aa9d04b1fa4c1480893262671, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/2998c43586ec493d8fcc590335008b86, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/733c0fd3cc1344b1aa801ca195ad7ec6] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=39.3 K 2024-11-20T19:25:30,515 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35345 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:30,515 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/B is initiating minor compaction (all files) 2024-11-20T19:25:30,515 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/B in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:30,515 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/4b6ac13db7044002a605f08de335c68c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/dd770c9cfa284a8ebebe13b3b66b6aea, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/c59179a637624909b3191dba8bc2553a] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=34.5 K 2024-11-20T19:25:30,515 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 336a283aa9d04b1fa4c1480893262671, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732130729582 2024-11-20T19:25:30,516 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b6ac13db7044002a605f08de335c68c, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732130729582 2024-11-20T19:25:30,516 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2998c43586ec493d8fcc590335008b86, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732130729635 2024-11-20T19:25:30,516 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting dd770c9cfa284a8ebebe13b3b66b6aea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732130729635 2024-11-20T19:25:30,517 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 733c0fd3cc1344b1aa801ca195ad7ec6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1732130729762 2024-11-20T19:25:30,517 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting c59179a637624909b3191dba8bc2553a, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1732130729762 2024-11-20T19:25:30,528 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#B#compaction#87 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:30,529 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/8aecf8fcba61427198df99775a77df72 is 50, key is test_row_0/B:col10/1732130730382/Put/seqid=0 2024-11-20T19:25:30,537 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#A#compaction#88 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:30,537 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/938aec4005fb437c97ab85d7dcfc46b8 is 50, key is test_row_0/A:col10/1732130730382/Put/seqid=0 2024-11-20T19:25:30,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741926_1102 (size=13289) 2024-11-20T19:25:30,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:30,549 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:25:30,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:30,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:30,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:30,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:30,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:30,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:30,557 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/8aecf8fcba61427198df99775a77df72 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/8aecf8fcba61427198df99775a77df72 2024-11-20T19:25:30,565 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/B of 6aad06303ed006b601a1faa1a93ab5da into 8aecf8fcba61427198df99775a77df72(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:30,565 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:30,565 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/B, priority=13, startTime=1732130730513; duration=0sec 2024-11-20T19:25:30,565 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:30,565 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:B 2024-11-20T19:25:30,565 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:30,567 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35345 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:30,567 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 6aad06303ed006b601a1faa1a93ab5da/C is initiating minor compaction (all files) 2024-11-20T19:25:30,568 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6aad06303ed006b601a1faa1a93ab5da/C in TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:30,568 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/7678e5a91dce4be183e4bfc0ec7a26a2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/1149352d72bc46448592bf360be4d32e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/36ab8712d9b340bbae6d9d94256a1c69] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp, totalSize=34.5 K 2024-11-20T19:25:30,568 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 7678e5a91dce4be183e4bfc0ec7a26a2, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1732130729582 2024-11-20T19:25:30,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/bf14b3e316b6450cb9e45f6fc59d52df is 50, key is test_row_0/A:col10/1732130730432/Put/seqid=0 2024-11-20T19:25:30,569 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 1149352d72bc46448592bf360be4d32e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732130729635 2024-11-20T19:25:30,569 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 36ab8712d9b340bbae6d9d94256a1c69, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1732130729762 2024-11-20T19:25:30,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741927_1103 (size=13289) 2024-11-20T19:25:30,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:30,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130790578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:30,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:30,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130790578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:30,584 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6aad06303ed006b601a1faa1a93ab5da#C#compaction#90 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:30,584 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/ca4b9902550642c08ad3fd73df5f939f is 50, key is test_row_0/C:col10/1732130730382/Put/seqid=0 2024-11-20T19:25:30,593 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/938aec4005fb437c97ab85d7dcfc46b8 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/938aec4005fb437c97ab85d7dcfc46b8 2024-11-20T19:25:30,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741928_1104 (size=14741) 2024-11-20T19:25:30,598 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/bf14b3e316b6450cb9e45f6fc59d52df 2024-11-20T19:25:30,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741929_1105 (size=13289) 2024-11-20T19:25:30,601 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/A of 6aad06303ed006b601a1faa1a93ab5da into 938aec4005fb437c97ab85d7dcfc46b8(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:30,601 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:30,602 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/A, priority=13, startTime=1732130730512; duration=0sec 2024-11-20T19:25:30,602 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:30,602 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:A 2024-11-20T19:25:30,613 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/ca4b9902550642c08ad3fd73df5f939f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/ca4b9902550642c08ad3fd73df5f939f 2024-11-20T19:25:30,622 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6aad06303ed006b601a1faa1a93ab5da/C of 6aad06303ed006b601a1faa1a93ab5da into ca4b9902550642c08ad3fd73df5f939f(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:30,622 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:30,622 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da., storeName=6aad06303ed006b601a1faa1a93ab5da/C, priority=13, startTime=1732130730513; duration=0sec 2024-11-20T19:25:30,623 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:30,623 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6aad06303ed006b601a1faa1a93ab5da:C 2024-11-20T19:25:30,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/e3bda1449f334f3296645ffb4623c00f is 50, key is test_row_0/B:col10/1732130730432/Put/seqid=0 2024-11-20T19:25:30,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741930_1106 (size=12301) 2024-11-20T19:25:30,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:30,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130790682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:30,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:30,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130790683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:30,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-20T19:25:30,770 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-20T19:25:30,772 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:30,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-20T19:25:30,774 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:30,775 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:30,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:30,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T19:25:30,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T19:25:30,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:30,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130790885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:30,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:30,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130790887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:30,928 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:30,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:30,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:30,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:30,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:30,930 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:30,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:30,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:31,044 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/e3bda1449f334f3296645ffb4623c00f 2024-11-20T19:25:31,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/48ed8de062af4dbba874d437e0ddc08e is 50, key is test_row_0/C:col10/1732130730432/Put/seqid=0 2024-11-20T19:25:31,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741931_1107 (size=12301) 2024-11-20T19:25:31,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T19:25:31,083 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:31,083 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:31,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:31,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:31,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:31,084 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:31,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:31,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:31,120 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x295cb1ac to 127.0.0.1:50476 2024-11-20T19:25:31,120 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:31,121 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c915d17 to 127.0.0.1:50476 2024-11-20T19:25:31,121 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:31,122 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70267494 to 127.0.0.1:50476 2024-11-20T19:25:31,122 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:31,123 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d2a8e08 to 127.0.0.1:50476 2024-11-20T19:25:31,123 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:31,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:31,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53940 deadline: 1732130791189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:31,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:31,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53948 deadline: 1732130791190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:31,235 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:31,236 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:31,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:31,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:31,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:31,236 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:31,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:31,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:31,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T19:25:31,389 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:31,390 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:31,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:31,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:31,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:31,391 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:31,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:31,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:31,468 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/48ed8de062af4dbba874d437e0ddc08e 2024-11-20T19:25:31,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/bf14b3e316b6450cb9e45f6fc59d52df as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/bf14b3e316b6450cb9e45f6fc59d52df 2024-11-20T19:25:31,481 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/bf14b3e316b6450cb9e45f6fc59d52df, entries=200, sequenceid=449, filesize=14.4 K 2024-11-20T19:25:31,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/e3bda1449f334f3296645ffb4623c00f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/e3bda1449f334f3296645ffb4623c00f 2024-11-20T19:25:31,488 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/e3bda1449f334f3296645ffb4623c00f, entries=150, sequenceid=449, filesize=12.0 K 2024-11-20T19:25:31,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/48ed8de062af4dbba874d437e0ddc08e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/48ed8de062af4dbba874d437e0ddc08e 2024-11-20T19:25:31,494 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/48ed8de062af4dbba874d437e0ddc08e, entries=150, sequenceid=449, filesize=12.0 K 2024-11-20T19:25:31,495 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 6aad06303ed006b601a1faa1a93ab5da in 946ms, sequenceid=449, compaction requested=false 2024-11-20T19:25:31,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:31,543 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:31,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-20T19:25:31,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:31,544 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:31,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:31,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:31,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:31,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:31,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:31,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:31,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/7df70146346d452fa2cec89f5862c3af is 50, key is test_row_0/A:col10/1732130730560/Put/seqid=0 2024-11-20T19:25:31,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741932_1108 (size=12301) 2024-11-20T19:25:31,573 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T19:25:31,694 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. as already flushing 2024-11-20T19:25:31,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:31,695 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22cb07dd to 127.0.0.1:50476 2024-11-20T19:25:31,695 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:31,697 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f343a4d to 127.0.0.1:50476 2024-11-20T19:25:31,698 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T19:25:31,954 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=463 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/7df70146346d452fa2cec89f5862c3af 2024-11-20T19:25:31,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/83ee91fc0e204eb9ac184410622a004e is 50, key is test_row_0/B:col10/1732130730560/Put/seqid=0 2024-11-20T19:25:31,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741933_1109 (size=12301) 2024-11-20T19:25:32,366 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=463 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/83ee91fc0e204eb9ac184410622a004e 2024-11-20T19:25:32,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/4e39f6590ad44320b646ab2bc21f3ded is 50, key is test_row_0/C:col10/1732130730560/Put/seqid=0 2024-11-20T19:25:32,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741934_1110 (size=12301) 2024-11-20T19:25:32,782 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=463 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/4e39f6590ad44320b646ab2bc21f3ded 2024-11-20T19:25:32,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/7df70146346d452fa2cec89f5862c3af as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/7df70146346d452fa2cec89f5862c3af 2024-11-20T19:25:32,814 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/7df70146346d452fa2cec89f5862c3af, entries=150, sequenceid=463, filesize=12.0 K 2024-11-20T19:25:32,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/83ee91fc0e204eb9ac184410622a004e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/83ee91fc0e204eb9ac184410622a004e 2024-11-20T19:25:32,819 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/83ee91fc0e204eb9ac184410622a004e, entries=150, sequenceid=463, filesize=12.0 K 2024-11-20T19:25:32,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/4e39f6590ad44320b646ab2bc21f3ded as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/4e39f6590ad44320b646ab2bc21f3ded 2024-11-20T19:25:32,825 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/4e39f6590ad44320b646ab2bc21f3ded, entries=150, sequenceid=463, filesize=12.0 K 2024-11-20T19:25:32,825 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=13.42 KB/13740 for 6aad06303ed006b601a1faa1a93ab5da in 1281ms, sequenceid=463, compaction requested=true 2024-11-20T19:25:32,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:32,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:32,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-20T19:25:32,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-20T19:25:32,828 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-20T19:25:32,828 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0520 sec 2024-11-20T19:25:32,829 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 2.0560 sec 2024-11-20T19:25:32,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-20T19:25:32,883 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-20T19:25:35,748 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38766d64 to 127.0.0.1:50476 2024-11-20T19:25:35,748 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:35,749 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5400112e to 127.0.0.1:50476 2024-11-20T19:25:35,749 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:35,803 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x478bae6b to 127.0.0.1:50476 2024-11-20T19:25:35,803 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:35,803 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T19:25:35,803 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 138 2024-11-20T19:25:35,804 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 139 2024-11-20T19:25:35,804 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-11-20T19:25:35,804 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-11-20T19:25:35,804 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 30 2024-11-20T19:25:35,804 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T19:25:35,804 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2822 2024-11-20T19:25:35,804 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2854 2024-11-20T19:25:35,804 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T19:25:35,804 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1320 2024-11-20T19:25:35,804 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3958 rows 2024-11-20T19:25:35,804 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1303 2024-11-20T19:25:35,804 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3909 rows 2024-11-20T19:25:35,804 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T19:25:35,804 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f6e36fe to 127.0.0.1:50476 2024-11-20T19:25:35,804 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:25:35,810 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T19:25:35,814 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T19:25:35,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:35,820 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130735820"}]},"ts":"1732130735820"} 2024-11-20T19:25:35,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T19:25:35,821 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T19:25:35,827 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T19:25:35,829 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:25:35,832 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=28, ppid=27, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6aad06303ed006b601a1faa1a93ab5da, UNASSIGN}] 2024-11-20T19:25:35,833 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=28, ppid=27, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6aad06303ed006b601a1faa1a93ab5da, UNASSIGN 2024-11-20T19:25:35,834 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=28 updating hbase:meta row=6aad06303ed006b601a1faa1a93ab5da, regionState=CLOSING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:35,835 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:25:35,835 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; CloseRegionProcedure 6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:25:35,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T19:25:35,991 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:35,993 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] handler.UnassignRegionHandler(124): Close 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:35,993 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:25:35,994 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1681): Closing 6aad06303ed006b601a1faa1a93ab5da, disabling compactions & flushes 2024-11-20T19:25:35,994 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:35,994 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:35,994 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. after waiting 0 ms 2024-11-20T19:25:35,994 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:35,994 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(2837): Flushing 6aad06303ed006b601a1faa1a93ab5da 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T19:25:35,995 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=A 2024-11-20T19:25:35,995 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:35,995 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=B 2024-11-20T19:25:35,995 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:35,995 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6aad06303ed006b601a1faa1a93ab5da, store=C 2024-11-20T19:25:35,995 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:36,000 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/8f8d70de05da4e778be5950b1c7cd433 is 50, key is test_row_0/A:col10/1732130735802/Put/seqid=0 2024-11-20T19:25:36,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741935_1111 (size=12301) 2024-11-20T19:25:36,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T19:25:36,406 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/8f8d70de05da4e778be5950b1c7cd433 2024-11-20T19:25:36,418 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/2928e7f704f444ea957c81f58a249e05 is 50, key is test_row_0/B:col10/1732130735802/Put/seqid=0 2024-11-20T19:25:36,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T19:25:36,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741936_1112 (size=12301) 2024-11-20T19:25:36,429 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/2928e7f704f444ea957c81f58a249e05 2024-11-20T19:25:36,440 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/011162a4ed524b6e83f692f42397d553 is 50, key is test_row_0/C:col10/1732130735802/Put/seqid=0 2024-11-20T19:25:36,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741937_1113 (size=12301) 2024-11-20T19:25:36,851 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/011162a4ed524b6e83f692f42397d553 2024-11-20T19:25:36,864 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/A/8f8d70de05da4e778be5950b1c7cd433 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/8f8d70de05da4e778be5950b1c7cd433 2024-11-20T19:25:36,868 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/8f8d70de05da4e778be5950b1c7cd433, entries=150, sequenceid=471, filesize=12.0 K 2024-11-20T19:25:36,869 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/B/2928e7f704f444ea957c81f58a249e05 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/2928e7f704f444ea957c81f58a249e05 2024-11-20T19:25:36,874 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/2928e7f704f444ea957c81f58a249e05, entries=150, sequenceid=471, filesize=12.0 K 2024-11-20T19:25:36,875 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/.tmp/C/011162a4ed524b6e83f692f42397d553 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/011162a4ed524b6e83f692f42397d553 2024-11-20T19:25:36,879 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/011162a4ed524b6e83f692f42397d553, entries=150, sequenceid=471, filesize=12.0 K 2024-11-20T19:25:36,880 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 6aad06303ed006b601a1faa1a93ab5da in 886ms, sequenceid=471, compaction requested=true 2024-11-20T19:25:36,881 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/22180d8bee424eae9c2a3c769bb45bb1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/f20a57254d4e438a9cee5b83a440e03e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/6db46912df1d4cef9a90fa0a5e5ff18e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/da89db61df6b4119bbd0cdc5dc435fe6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/b6d5ee6f291e421280dc829aef7c4228, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/b66736ddd76c4f9b9e332d30e66b7514, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/01399b0fba2f47c49755b1227d75e6ee, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/bea8a37abb8a4673a7cb85dbc4e35aa1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ec5cbffc4cfe4970bb2fcae30d3ed8c8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/233365209b8c435392e6895c7cd2b409, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/67affd162c76432f9083a95564243ae1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/aef16aaf336c4e19a0c4f4dee3b816af, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/3d9eca8c364a43e0a40098a4f370c534, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/c88f0032495d46168d33c141e3b06ed8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ff43d2a624084ca7a9042ee3137f6111, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/e70a0146fc6340338fc403ba16940297, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/c145c015c4a142a48f31240c860f128f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/1671b638be714796b96d942f6b2f4b1d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/d71b072a8a154fc2abf495f3db5360ee, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/33e9575f2dfe4f248aa8c99be594a855, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/38c6c104ec8641a98acca319506f371f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/61b9a5bde3ae4f5baea40a1a04976c2a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ef858e74d9a545578f91c4e196c9696d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/cc4ae9151b5e4c63b2ff94baf2cc942b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/487885383fe74daf902833275007bcbd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/63eba2bb3fec4ed18adf3f139f20226b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/336a283aa9d04b1fa4c1480893262671, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/2998c43586ec493d8fcc590335008b86, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/733c0fd3cc1344b1aa801ca195ad7ec6] to archive 2024-11-20T19:25:36,884 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:25:36,890 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/22180d8bee424eae9c2a3c769bb45bb1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/22180d8bee424eae9c2a3c769bb45bb1 2024-11-20T19:25:36,892 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/f20a57254d4e438a9cee5b83a440e03e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/f20a57254d4e438a9cee5b83a440e03e 2024-11-20T19:25:36,894 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/6db46912df1d4cef9a90fa0a5e5ff18e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/6db46912df1d4cef9a90fa0a5e5ff18e 2024-11-20T19:25:36,895 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/da89db61df6b4119bbd0cdc5dc435fe6 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/da89db61df6b4119bbd0cdc5dc435fe6 2024-11-20T19:25:36,897 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/b6d5ee6f291e421280dc829aef7c4228 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/b6d5ee6f291e421280dc829aef7c4228 2024-11-20T19:25:36,899 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/b66736ddd76c4f9b9e332d30e66b7514 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/b66736ddd76c4f9b9e332d30e66b7514 2024-11-20T19:25:36,901 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/01399b0fba2f47c49755b1227d75e6ee to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/01399b0fba2f47c49755b1227d75e6ee 2024-11-20T19:25:36,902 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/bea8a37abb8a4673a7cb85dbc4e35aa1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/bea8a37abb8a4673a7cb85dbc4e35aa1 2024-11-20T19:25:36,903 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ec5cbffc4cfe4970bb2fcae30d3ed8c8 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ec5cbffc4cfe4970bb2fcae30d3ed8c8 2024-11-20T19:25:36,905 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/233365209b8c435392e6895c7cd2b409 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/233365209b8c435392e6895c7cd2b409 2024-11-20T19:25:36,906 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/67affd162c76432f9083a95564243ae1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/67affd162c76432f9083a95564243ae1 2024-11-20T19:25:36,907 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/aef16aaf336c4e19a0c4f4dee3b816af to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/aef16aaf336c4e19a0c4f4dee3b816af 2024-11-20T19:25:36,908 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/3d9eca8c364a43e0a40098a4f370c534 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/3d9eca8c364a43e0a40098a4f370c534 2024-11-20T19:25:36,909 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/c88f0032495d46168d33c141e3b06ed8 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/c88f0032495d46168d33c141e3b06ed8 2024-11-20T19:25:36,910 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ff43d2a624084ca7a9042ee3137f6111 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ff43d2a624084ca7a9042ee3137f6111 2024-11-20T19:25:36,912 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/e70a0146fc6340338fc403ba16940297 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/e70a0146fc6340338fc403ba16940297 2024-11-20T19:25:36,913 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/c145c015c4a142a48f31240c860f128f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/c145c015c4a142a48f31240c860f128f 2024-11-20T19:25:36,914 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/1671b638be714796b96d942f6b2f4b1d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/1671b638be714796b96d942f6b2f4b1d 2024-11-20T19:25:36,915 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/d71b072a8a154fc2abf495f3db5360ee to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/d71b072a8a154fc2abf495f3db5360ee 2024-11-20T19:25:36,916 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/33e9575f2dfe4f248aa8c99be594a855 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/33e9575f2dfe4f248aa8c99be594a855 2024-11-20T19:25:36,917 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/38c6c104ec8641a98acca319506f371f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/38c6c104ec8641a98acca319506f371f 2024-11-20T19:25:36,918 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/61b9a5bde3ae4f5baea40a1a04976c2a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/61b9a5bde3ae4f5baea40a1a04976c2a 2024-11-20T19:25:36,919 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ef858e74d9a545578f91c4e196c9696d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/ef858e74d9a545578f91c4e196c9696d 2024-11-20T19:25:36,920 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/cc4ae9151b5e4c63b2ff94baf2cc942b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/cc4ae9151b5e4c63b2ff94baf2cc942b 2024-11-20T19:25:36,921 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/487885383fe74daf902833275007bcbd to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/487885383fe74daf902833275007bcbd 2024-11-20T19:25:36,922 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/63eba2bb3fec4ed18adf3f139f20226b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/63eba2bb3fec4ed18adf3f139f20226b 2024-11-20T19:25:36,923 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/336a283aa9d04b1fa4c1480893262671 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/336a283aa9d04b1fa4c1480893262671 2024-11-20T19:25:36,924 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/2998c43586ec493d8fcc590335008b86 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/2998c43586ec493d8fcc590335008b86 2024-11-20T19:25:36,925 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/733c0fd3cc1344b1aa801ca195ad7ec6 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/733c0fd3cc1344b1aa801ca195ad7ec6 2024-11-20T19:25:36,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T19:25:36,936 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/0f3823c0d19d452cbb54e1f29b87a91d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a24a33034ed8478f8303247a96f61960, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/ebcc4aa49e7e406cadf44145deadf29a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/8e44786668b84f5d8255591fc95003b8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/ebbeda9c43a2459eae601f5f27731bd5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/4377eb601e854ad6ba02b219e0b1bc88, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/efa284dc78094e0b845de0ac8ede097b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6e65cbebfdf04bc28448d53afe1a3fb5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/90c3fc467a6f42ce90d10a765ba689ee, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a67b3d5dd29d4b549fbdfc30a9841a2f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a0754508617141dda2fbf8d19eea3471, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/09223248a6014eaf95ffe0aee435cd7d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a2b3e025809b49699ce90c6e4560bdbb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6a00b4073584455a8912074b2a99392a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/d6e89f21aa8c46f8924c007e78865034, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/61170bc9f09d443ca8ab11fe5875b5d8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a01f64e164cb40eeaa8c74477289dc45, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/31fd5bc6508041498fa075869c167fc0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/dd02e6000d1c49c2ad38b4f29858c646, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6498bc87a3e94d2e9d04ed1d249806f9, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a20e2591c21543c2a624d0fe9d69b8fa, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/c32dd38b02b94f3eb09a2491c91775f5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a438e011128d485785f4facf067aae91, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/060efee4bd654115962aad0215b44ceb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/efa2c6fb84c046e9a28843f1d704aca2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/4b6ac13db7044002a605f08de335c68c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/83e37b5dfc234602af11333c5d53c17a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/dd770c9cfa284a8ebebe13b3b66b6aea, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/c59179a637624909b3191dba8bc2553a] to archive 2024-11-20T19:25:36,937 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:25:36,938 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/0f3823c0d19d452cbb54e1f29b87a91d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/0f3823c0d19d452cbb54e1f29b87a91d 2024-11-20T19:25:36,940 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a24a33034ed8478f8303247a96f61960 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a24a33034ed8478f8303247a96f61960 2024-11-20T19:25:36,941 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/ebcc4aa49e7e406cadf44145deadf29a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/ebcc4aa49e7e406cadf44145deadf29a 2024-11-20T19:25:36,942 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/8e44786668b84f5d8255591fc95003b8 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/8e44786668b84f5d8255591fc95003b8 2024-11-20T19:25:36,943 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/ebbeda9c43a2459eae601f5f27731bd5 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/ebbeda9c43a2459eae601f5f27731bd5 2024-11-20T19:25:36,944 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/4377eb601e854ad6ba02b219e0b1bc88 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/4377eb601e854ad6ba02b219e0b1bc88 2024-11-20T19:25:36,945 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/efa284dc78094e0b845de0ac8ede097b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/efa284dc78094e0b845de0ac8ede097b 2024-11-20T19:25:36,946 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6e65cbebfdf04bc28448d53afe1a3fb5 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6e65cbebfdf04bc28448d53afe1a3fb5 2024-11-20T19:25:36,947 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/90c3fc467a6f42ce90d10a765ba689ee to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/90c3fc467a6f42ce90d10a765ba689ee 2024-11-20T19:25:36,948 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a67b3d5dd29d4b549fbdfc30a9841a2f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a67b3d5dd29d4b549fbdfc30a9841a2f 2024-11-20T19:25:36,949 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a0754508617141dda2fbf8d19eea3471 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a0754508617141dda2fbf8d19eea3471 2024-11-20T19:25:36,949 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/09223248a6014eaf95ffe0aee435cd7d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/09223248a6014eaf95ffe0aee435cd7d 2024-11-20T19:25:36,950 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a2b3e025809b49699ce90c6e4560bdbb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a2b3e025809b49699ce90c6e4560bdbb 2024-11-20T19:25:36,951 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6a00b4073584455a8912074b2a99392a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6a00b4073584455a8912074b2a99392a 2024-11-20T19:25:36,952 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/d6e89f21aa8c46f8924c007e78865034 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/d6e89f21aa8c46f8924c007e78865034 2024-11-20T19:25:36,953 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/61170bc9f09d443ca8ab11fe5875b5d8 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/61170bc9f09d443ca8ab11fe5875b5d8 2024-11-20T19:25:36,954 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a01f64e164cb40eeaa8c74477289dc45 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a01f64e164cb40eeaa8c74477289dc45 2024-11-20T19:25:36,954 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/31fd5bc6508041498fa075869c167fc0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/31fd5bc6508041498fa075869c167fc0 2024-11-20T19:25:36,955 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/dd02e6000d1c49c2ad38b4f29858c646 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/dd02e6000d1c49c2ad38b4f29858c646 2024-11-20T19:25:36,956 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6498bc87a3e94d2e9d04ed1d249806f9 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/6498bc87a3e94d2e9d04ed1d249806f9 2024-11-20T19:25:36,957 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a20e2591c21543c2a624d0fe9d69b8fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a20e2591c21543c2a624d0fe9d69b8fa 2024-11-20T19:25:36,958 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/c32dd38b02b94f3eb09a2491c91775f5 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/c32dd38b02b94f3eb09a2491c91775f5 2024-11-20T19:25:36,959 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a438e011128d485785f4facf067aae91 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/a438e011128d485785f4facf067aae91 2024-11-20T19:25:36,960 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/060efee4bd654115962aad0215b44ceb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/060efee4bd654115962aad0215b44ceb 2024-11-20T19:25:36,960 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/efa2c6fb84c046e9a28843f1d704aca2 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/efa2c6fb84c046e9a28843f1d704aca2 2024-11-20T19:25:36,961 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/4b6ac13db7044002a605f08de335c68c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/4b6ac13db7044002a605f08de335c68c 2024-11-20T19:25:36,962 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/83e37b5dfc234602af11333c5d53c17a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/83e37b5dfc234602af11333c5d53c17a 2024-11-20T19:25:36,963 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/dd770c9cfa284a8ebebe13b3b66b6aea to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/dd770c9cfa284a8ebebe13b3b66b6aea 2024-11-20T19:25:36,964 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/c59179a637624909b3191dba8bc2553a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/c59179a637624909b3191dba8bc2553a 2024-11-20T19:25:36,965 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/b79f78470eb544058f7fd776bfc7d0c3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/0831a4766f7940a59b87e488b702d465, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/3c8bf4ddc1da446d8bcb2190b320fd45, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/e04c465e3df34a028e72beedcb454471, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/c26b68ce67944b9089384198c9b94144, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/2d5b2a92bbd44986bded953ef221511a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/42c825f9438c44c684779d0e0b2112f0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/63b0de508eaa4adb93ec808979afcb97, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f18c0867ca26447283c5fb342fbf0d12, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/86bf254f55d342eb8a6b39b9687152ef, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f921390337fc4f2a99bfed868907987d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/9c7dfb291861485daca55953fcc2aada, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/7fd49b0c099b406b98498afbcbc9f805, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/1df738cbd3a4458c9e0be393e5f1676a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/747d36516f1e44c5b0ea9e9d6eb04721, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/8bd3e668903543e2911621be4733a7f1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/fa39bb52015c419bb3bdcfa6b2b1874c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/b2d1bbbf672d49de90aa960a5c90a4c8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/0739eb89942041038cf577ac58a58534, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/95f96acd99764117a2b5405d0abdfdf0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/cccceabb177b47a5a861ec8fae197555, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/651b0ebb088646479bb78ccf9a7d4ebb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/a92fe3702ef7462396ba344973c03e08, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/60617ec004f44e9381c87efc81e67f04, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f237f1feb5d34588ad6f9e938589b57c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/7678e5a91dce4be183e4bfc0ec7a26a2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/120fe93aa76d43d9aab3cb67f1dd81b5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/1149352d72bc46448592bf360be4d32e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/36ab8712d9b340bbae6d9d94256a1c69] to archive 2024-11-20T19:25:36,966 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:25:36,967 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/b79f78470eb544058f7fd776bfc7d0c3 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/b79f78470eb544058f7fd776bfc7d0c3 2024-11-20T19:25:36,968 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/0831a4766f7940a59b87e488b702d465 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/0831a4766f7940a59b87e488b702d465 2024-11-20T19:25:36,969 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/3c8bf4ddc1da446d8bcb2190b320fd45 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/3c8bf4ddc1da446d8bcb2190b320fd45 2024-11-20T19:25:36,970 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/e04c465e3df34a028e72beedcb454471 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/e04c465e3df34a028e72beedcb454471 2024-11-20T19:25:36,971 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/c26b68ce67944b9089384198c9b94144 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/c26b68ce67944b9089384198c9b94144 2024-11-20T19:25:36,972 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/2d5b2a92bbd44986bded953ef221511a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/2d5b2a92bbd44986bded953ef221511a 2024-11-20T19:25:36,973 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/42c825f9438c44c684779d0e0b2112f0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/42c825f9438c44c684779d0e0b2112f0 2024-11-20T19:25:36,974 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/63b0de508eaa4adb93ec808979afcb97 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/63b0de508eaa4adb93ec808979afcb97 2024-11-20T19:25:36,976 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f18c0867ca26447283c5fb342fbf0d12 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f18c0867ca26447283c5fb342fbf0d12 2024-11-20T19:25:36,977 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/86bf254f55d342eb8a6b39b9687152ef to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/86bf254f55d342eb8a6b39b9687152ef 2024-11-20T19:25:36,978 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f921390337fc4f2a99bfed868907987d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f921390337fc4f2a99bfed868907987d 2024-11-20T19:25:36,980 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/9c7dfb291861485daca55953fcc2aada to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/9c7dfb291861485daca55953fcc2aada 2024-11-20T19:25:36,981 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/7fd49b0c099b406b98498afbcbc9f805 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/7fd49b0c099b406b98498afbcbc9f805 2024-11-20T19:25:36,982 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/1df738cbd3a4458c9e0be393e5f1676a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/1df738cbd3a4458c9e0be393e5f1676a 2024-11-20T19:25:36,984 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/747d36516f1e44c5b0ea9e9d6eb04721 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/747d36516f1e44c5b0ea9e9d6eb04721 2024-11-20T19:25:36,985 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/8bd3e668903543e2911621be4733a7f1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/8bd3e668903543e2911621be4733a7f1 2024-11-20T19:25:36,987 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/fa39bb52015c419bb3bdcfa6b2b1874c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/fa39bb52015c419bb3bdcfa6b2b1874c 2024-11-20T19:25:36,989 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/b2d1bbbf672d49de90aa960a5c90a4c8 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/b2d1bbbf672d49de90aa960a5c90a4c8 2024-11-20T19:25:36,990 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/0739eb89942041038cf577ac58a58534 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/0739eb89942041038cf577ac58a58534 2024-11-20T19:25:36,991 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/95f96acd99764117a2b5405d0abdfdf0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/95f96acd99764117a2b5405d0abdfdf0 2024-11-20T19:25:36,992 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/cccceabb177b47a5a861ec8fae197555 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/cccceabb177b47a5a861ec8fae197555 2024-11-20T19:25:36,993 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/651b0ebb088646479bb78ccf9a7d4ebb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/651b0ebb088646479bb78ccf9a7d4ebb 2024-11-20T19:25:36,994 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/a92fe3702ef7462396ba344973c03e08 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/a92fe3702ef7462396ba344973c03e08 2024-11-20T19:25:36,995 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/60617ec004f44e9381c87efc81e67f04 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/60617ec004f44e9381c87efc81e67f04 2024-11-20T19:25:36,996 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f237f1feb5d34588ad6f9e938589b57c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/f237f1feb5d34588ad6f9e938589b57c 2024-11-20T19:25:36,997 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/7678e5a91dce4be183e4bfc0ec7a26a2 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/7678e5a91dce4be183e4bfc0ec7a26a2 2024-11-20T19:25:36,998 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/120fe93aa76d43d9aab3cb67f1dd81b5 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/120fe93aa76d43d9aab3cb67f1dd81b5 2024-11-20T19:25:37,000 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/1149352d72bc46448592bf360be4d32e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/1149352d72bc46448592bf360be4d32e 2024-11-20T19:25:37,001 DEBUG [StoreCloser-TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/36ab8712d9b340bbae6d9d94256a1c69 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/36ab8712d9b340bbae6d9d94256a1c69 2024-11-20T19:25:37,006 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/recovered.edits/474.seqid, newMaxSeqId=474, maxSeqId=1 2024-11-20T19:25:37,009 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da. 2024-11-20T19:25:37,009 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1635): Region close journal for 6aad06303ed006b601a1faa1a93ab5da: 2024-11-20T19:25:37,011 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] handler.UnassignRegionHandler(170): Closed 6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:37,011 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=28 updating hbase:meta row=6aad06303ed006b601a1faa1a93ab5da, regionState=CLOSED 2024-11-20T19:25:37,015 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-20T19:25:37,015 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; CloseRegionProcedure 6aad06303ed006b601a1faa1a93ab5da, server=db9c3a6c6492,35979,1732130703276 in 1.1780 sec 2024-11-20T19:25:37,016 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=28, resume processing ppid=27 2024-11-20T19:25:37,016 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, ppid=27, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6aad06303ed006b601a1faa1a93ab5da, UNASSIGN in 1.1830 sec 2024-11-20T19:25:37,019 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-20T19:25:37,020 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.1880 sec 2024-11-20T19:25:37,021 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130737020"}]},"ts":"1732130737020"} 2024-11-20T19:25:37,022 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T19:25:37,083 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T19:25:37,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.2690 sec 2024-11-20T19:25:37,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-20T19:25:37,927 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-20T19:25:37,931 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T19:25:37,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:37,935 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=30, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:37,937 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=30, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:37,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T19:25:37,940 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:37,944 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/recovered.edits] 2024-11-20T19:25:37,958 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/7df70146346d452fa2cec89f5862c3af to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/7df70146346d452fa2cec89f5862c3af 2024-11-20T19:25:37,960 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/8f8d70de05da4e778be5950b1c7cd433 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/8f8d70de05da4e778be5950b1c7cd433 2024-11-20T19:25:37,961 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/938aec4005fb437c97ab85d7dcfc46b8 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/938aec4005fb437c97ab85d7dcfc46b8 2024-11-20T19:25:37,964 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/bf14b3e316b6450cb9e45f6fc59d52df to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/A/bf14b3e316b6450cb9e45f6fc59d52df 2024-11-20T19:25:37,968 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/2928e7f704f444ea957c81f58a249e05 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/2928e7f704f444ea957c81f58a249e05 2024-11-20T19:25:37,971 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/83ee91fc0e204eb9ac184410622a004e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/83ee91fc0e204eb9ac184410622a004e 2024-11-20T19:25:37,973 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/8aecf8fcba61427198df99775a77df72 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/8aecf8fcba61427198df99775a77df72 2024-11-20T19:25:37,975 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/e3bda1449f334f3296645ffb4623c00f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/B/e3bda1449f334f3296645ffb4623c00f 2024-11-20T19:25:37,978 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/011162a4ed524b6e83f692f42397d553 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/011162a4ed524b6e83f692f42397d553 2024-11-20T19:25:37,979 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/48ed8de062af4dbba874d437e0ddc08e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/48ed8de062af4dbba874d437e0ddc08e 2024-11-20T19:25:37,981 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/4e39f6590ad44320b646ab2bc21f3ded to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/4e39f6590ad44320b646ab2bc21f3ded 2024-11-20T19:25:37,982 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/ca4b9902550642c08ad3fd73df5f939f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/C/ca4b9902550642c08ad3fd73df5f939f 2024-11-20T19:25:37,985 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/recovered.edits/474.seqid to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da/recovered.edits/474.seqid 2024-11-20T19:25:37,985 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/6aad06303ed006b601a1faa1a93ab5da 2024-11-20T19:25:37,986 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T19:25:37,991 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=30, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:37,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-20T19:25:37,997 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T19:25:38,024 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T19:25:38,025 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=30, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:38,026 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T19:25:38,026 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732130738026"}]},"ts":"9223372036854775807"} 2024-11-20T19:25:38,029 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T19:25:38,029 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 6aad06303ed006b601a1faa1a93ab5da, NAME => 'TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T19:25:38,029 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T19:25:38,029 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732130738029"}]},"ts":"9223372036854775807"} 2024-11-20T19:25:38,032 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T19:25:38,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T19:25:38,045 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=30, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:38,046 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 113 msec 2024-11-20T19:25:38,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-20T19:25:38,240 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-20T19:25:38,253 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=238 (was 219) Potentially hanging thread: hconnection-0x571b1d6e-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x571b1d6e-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x571b1d6e-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;db9c3a6c6492:35979-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x571b1d6e-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-907484729_22 at /127.0.0.1:35666 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=543 (was 196) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3716 (was 4642) 2024-11-20T19:25:38,264 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=238, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=543, ProcessCount=11, AvailableMemoryMB=3715 2024-11-20T19:25:38,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:25:38,267 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:25:38,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=31, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:38,269 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T19:25:38,269 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:38,269 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 31 2024-11-20T19:25:38,270 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T19:25:38,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-11-20T19:25:38,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741938_1114 (size=963) 2024-11-20T19:25:38,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-11-20T19:25:38,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-11-20T19:25:38,682 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d 2024-11-20T19:25:38,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741939_1115 (size=53) 2024-11-20T19:25:38,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-11-20T19:25:38,971 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-20T19:25:38,973 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43014, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-20T19:25:39,095 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:39,095 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 77d20aeb75d4c61c8417aef439da7cbf, disabling compactions & flushes 2024-11-20T19:25:39,096 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:39,096 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:39,096 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. after waiting 0 ms 2024-11-20T19:25:39,096 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:39,096 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:39,096 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:39,098 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T19:25:39,098 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732130739098"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732130739098"}]},"ts":"1732130739098"} 2024-11-20T19:25:39,101 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T19:25:39,102 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T19:25:39,103 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130739102"}]},"ts":"1732130739102"} 2024-11-20T19:25:39,105 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T19:25:39,158 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=77d20aeb75d4c61c8417aef439da7cbf, ASSIGN}] 2024-11-20T19:25:39,159 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=77d20aeb75d4c61c8417aef439da7cbf, ASSIGN 2024-11-20T19:25:39,160 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=77d20aeb75d4c61c8417aef439da7cbf, ASSIGN; state=OFFLINE, location=db9c3a6c6492,35979,1732130703276; forceNewPlan=false, retain=false 2024-11-20T19:25:39,310 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=77d20aeb75d4c61c8417aef439da7cbf, regionState=OPENING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:39,313 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; OpenRegionProcedure 77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:25:39,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-11-20T19:25:39,466 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:39,472 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:39,472 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7285): Opening region: {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:25:39,473 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:39,473 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:39,473 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7327): checking encryption for 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:39,473 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7330): checking classloading for 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:39,476 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:39,479 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:39,479 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 77d20aeb75d4c61c8417aef439da7cbf columnFamilyName A 2024-11-20T19:25:39,479 DEBUG [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:39,480 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.HStore(327): Store=77d20aeb75d4c61c8417aef439da7cbf/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:39,480 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:39,482 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:39,483 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 77d20aeb75d4c61c8417aef439da7cbf columnFamilyName B 2024-11-20T19:25:39,483 DEBUG [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:39,484 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.HStore(327): Store=77d20aeb75d4c61c8417aef439da7cbf/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:39,484 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:39,486 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:39,486 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 77d20aeb75d4c61c8417aef439da7cbf columnFamilyName C 2024-11-20T19:25:39,486 DEBUG [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:39,487 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.HStore(327): Store=77d20aeb75d4c61c8417aef439da7cbf/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:39,487 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:39,488 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:39,489 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:39,491 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:25:39,492 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1085): writing seq id for 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:39,495 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:25:39,496 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1102): Opened 77d20aeb75d4c61c8417aef439da7cbf; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73683550, jitterRate=0.09797045588493347}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:25:39,497 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1001): Region open journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:39,498 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., pid=33, masterSystemTime=1732130739466 2024-11-20T19:25:39,500 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:39,500 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:39,500 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=77d20aeb75d4c61c8417aef439da7cbf, regionState=OPEN, openSeqNum=2, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:39,503 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-20T19:25:39,504 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; OpenRegionProcedure 77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 in 189 msec 2024-11-20T19:25:39,505 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=31 2024-11-20T19:25:39,506 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=31, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=77d20aeb75d4c61c8417aef439da7cbf, ASSIGN in 345 msec 2024-11-20T19:25:39,506 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T19:25:39,507 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130739506"}]},"ts":"1732130739506"} 2024-11-20T19:25:39,508 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T19:25:39,520 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T19:25:39,522 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2530 sec 2024-11-20T19:25:40,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-11-20T19:25:40,377 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 31 completed 2024-11-20T19:25:40,379 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04506927 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a9b9802 2024-11-20T19:25:40,427 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@118b007e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:40,431 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:40,433 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60118, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:40,435 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T19:25:40,437 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43018, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T19:25:40,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:25:40,443 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:25:40,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T19:25:40,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741940_1116 (size=999) 2024-11-20T19:25:40,867 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-20T19:25:40,867 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-20T19:25:40,873 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:25:40,886 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=77d20aeb75d4c61c8417aef439da7cbf, REOPEN/MOVE}] 2024-11-20T19:25:40,887 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=77d20aeb75d4c61c8417aef439da7cbf, REOPEN/MOVE 2024-11-20T19:25:40,889 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=77d20aeb75d4c61c8417aef439da7cbf, regionState=CLOSING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:40,891 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:25:40,891 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; CloseRegionProcedure 77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:25:41,045 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:41,045 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(124): Close 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:41,046 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:25:41,046 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1681): Closing 77d20aeb75d4c61c8417aef439da7cbf, disabling compactions & flushes 2024-11-20T19:25:41,046 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:41,046 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:41,046 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. after waiting 0 ms 2024-11-20T19:25:41,046 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:41,077 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T19:25:41,080 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:41,080 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1635): Region close journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:41,081 WARN [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionServer(3786): Not adding moved region record: 77d20aeb75d4c61c8417aef439da7cbf to self. 2024-11-20T19:25:41,084 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=77d20aeb75d4c61c8417aef439da7cbf, regionState=CLOSED 2024-11-20T19:25:41,085 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(170): Closed 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:41,087 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-20T19:25:41,087 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseRegionProcedure 77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 in 194 msec 2024-11-20T19:25:41,090 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=77d20aeb75d4c61c8417aef439da7cbf, REOPEN/MOVE; state=CLOSED, location=db9c3a6c6492,35979,1732130703276; forceNewPlan=false, retain=true 2024-11-20T19:25:41,240 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=77d20aeb75d4c61c8417aef439da7cbf, regionState=OPENING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:41,244 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=36, state=RUNNABLE; OpenRegionProcedure 77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:25:41,401 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:41,406 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:41,406 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(7285): Opening region: {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:25:41,406 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:41,406 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:25:41,407 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(7327): checking encryption for 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:41,407 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(7330): checking classloading for 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:41,412 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:41,413 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:41,420 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 77d20aeb75d4c61c8417aef439da7cbf columnFamilyName A 2024-11-20T19:25:41,431 DEBUG [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:41,432 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.HStore(327): Store=77d20aeb75d4c61c8417aef439da7cbf/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:41,433 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:41,434 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:41,434 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 77d20aeb75d4c61c8417aef439da7cbf columnFamilyName B 2024-11-20T19:25:41,434 DEBUG [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:41,435 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.HStore(327): Store=77d20aeb75d4c61c8417aef439da7cbf/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:41,435 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:41,443 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:25:41,443 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 77d20aeb75d4c61c8417aef439da7cbf columnFamilyName C 2024-11-20T19:25:41,443 DEBUG [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:41,444 INFO [StoreOpener-77d20aeb75d4c61c8417aef439da7cbf-1 {}] regionserver.HStore(327): Store=77d20aeb75d4c61c8417aef439da7cbf/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:25:41,444 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:41,447 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:41,448 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:41,450 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:25:41,452 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(1085): writing seq id for 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:41,457 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(1102): Opened 77d20aeb75d4c61c8417aef439da7cbf; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61773415, jitterRate=-0.07950438559055328}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:25:41,460 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(1001): Region open journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:41,462 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., pid=38, masterSystemTime=1732130741401 2024-11-20T19:25:41,464 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=77d20aeb75d4c61c8417aef439da7cbf, regionState=OPEN, openSeqNum=5, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:41,465 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:41,465 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:41,467 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=36 2024-11-20T19:25:41,472 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-11-20T19:25:41,473 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=77d20aeb75d4c61c8417aef439da7cbf, REOPEN/MOVE in 581 msec 2024-11-20T19:25:41,473 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=36, state=SUCCESS; OpenRegionProcedure 77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 in 223 msec 2024-11-20T19:25:41,476 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-20T19:25:41,476 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 602 msec 2024-11-20T19:25:41,480 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 1.0310 sec 2024-11-20T19:25:41,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-20T19:25:41,496 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7362d978 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cae6c5c 2024-11-20T19:25:41,575 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c7d6279, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:41,577 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7bad2e85 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c820ef9 2024-11-20T19:25:41,601 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b55744e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:41,603 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ebda6ad to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@b44b1e5 2024-11-20T19:25:41,619 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@454f1431, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:41,621 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x19a533a3 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@42e904d8 2024-11-20T19:25:41,637 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@769942d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:41,639 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x465dc764 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a4c53ed 2024-11-20T19:25:41,653 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@367f47f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:41,656 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68f0be85 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@247c0c93 2024-11-20T19:25:41,678 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22e911df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:41,680 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x152377d4 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@517ff977 2024-11-20T19:25:41,693 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b727d6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:41,695 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1a52344f to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3448d233 2024-11-20T19:25:41,722 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c7940d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:41,723 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08ba8425 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a11164b 2024-11-20T19:25:41,742 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c38ee58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:25:41,758 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:41,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=39, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees 2024-11-20T19:25:41,762 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=39, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:41,763 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=39, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:41,763 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:41,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T19:25:41,788 DEBUG [hconnection-0x1cce6155-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:41,789 DEBUG [hconnection-0x6ddea86a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:41,790 DEBUG [hconnection-0x58a91590-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:41,791 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60128, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:41,793 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60130, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:41,798 DEBUG [hconnection-0x33b01b16-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:41,801 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60142, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:41,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:41,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:41,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:41,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:41,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:41,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:41,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:41,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:41,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T19:25:41,875 DEBUG [hconnection-0x5b37bb17-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:41,876 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60154, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:41,880 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60158, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:41,885 DEBUG [hconnection-0x615bfe4c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:41,886 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60174, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:41,894 DEBUG [hconnection-0x762ecd21-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:41,896 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60182, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:41,903 DEBUG [hconnection-0x5b607ff4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:41,904 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:41,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130801906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:41,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130801908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:41,917 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:41,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130801911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:41,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130801909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:41,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:41,918 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-11-20T19:25:41,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130801913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:41,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:41,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:41,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:41,918 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:41,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:41,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:41,939 DEBUG [hconnection-0x3006e540-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:25:41,940 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60186, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:25:41,972 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207cc3d712328c4759a030a44d34a03b2e_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130741810/Put/seqid=0 2024-11-20T19:25:42,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130802020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130802020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130802020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130802020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,026 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130802021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741941_1117 (size=12154) 2024-11-20T19:25:42,037 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:42,043 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207cc3d712328c4759a030a44d34a03b2e_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207cc3d712328c4759a030a44d34a03b2e_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:42,045 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/7cfb627e481d400684668d96f4847444, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:42,053 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/7cfb627e481d400684668d96f4847444 is 175, key is test_row_0/A:col10/1732130741810/Put/seqid=0 2024-11-20T19:25:42,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T19:25:42,071 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-11-20T19:25:42,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:42,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:42,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:42,072 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:42,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:42,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:42,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741942_1118 (size=30955) 2024-11-20T19:25:42,100 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/7cfb627e481d400684668d96f4847444 2024-11-20T19:25:42,151 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/faad00aa7fd3495497cd5cf53189af8e is 50, key is test_row_0/B:col10/1732130741810/Put/seqid=0 2024-11-20T19:25:42,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741943_1119 (size=12001) 2024-11-20T19:25:42,206 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/faad00aa7fd3495497cd5cf53189af8e 2024-11-20T19:25:42,225 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,225 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-11-20T19:25:42,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:42,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:42,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:42,226 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:42,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:42,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:42,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130802224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130802228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130802224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130802234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130802234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/edbee67de31446e5a03fd8cbb2c7aad7 is 50, key is test_row_0/C:col10/1732130741810/Put/seqid=0 2024-11-20T19:25:42,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T19:25:42,380 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-11-20T19:25:42,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:42,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:42,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:42,381 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:42,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:42,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:42,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741944_1120 (size=12001) 2024-11-20T19:25:42,413 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/edbee67de31446e5a03fd8cbb2c7aad7 2024-11-20T19:25:42,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/7cfb627e481d400684668d96f4847444 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7cfb627e481d400684668d96f4847444 2024-11-20T19:25:42,426 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7cfb627e481d400684668d96f4847444, entries=150, sequenceid=15, filesize=30.2 K 2024-11-20T19:25:42,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/faad00aa7fd3495497cd5cf53189af8e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/faad00aa7fd3495497cd5cf53189af8e 2024-11-20T19:25:42,443 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/faad00aa7fd3495497cd5cf53189af8e, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T19:25:42,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/edbee67de31446e5a03fd8cbb2c7aad7 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/edbee67de31446e5a03fd8cbb2c7aad7 2024-11-20T19:25:42,452 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/edbee67de31446e5a03fd8cbb2c7aad7, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T19:25:42,453 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 77d20aeb75d4c61c8417aef439da7cbf in 628ms, sequenceid=15, compaction requested=false 2024-11-20T19:25:42,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:42,534 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,534 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-11-20T19:25:42,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:42,535 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:25:42,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:42,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:42,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:42,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:42,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:42,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:42,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:42,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:42,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130802547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130802550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130802551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130802551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130802552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,583 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T19:25:42,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120abbc7e03755048f8adb9f4fa710a28c9_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130741899/Put/seqid=0 2024-11-20T19:25:42,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130802655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130802659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130802659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130802654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130802664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741945_1121 (size=12154) 2024-11-20T19:25:42,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:42,699 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120abbc7e03755048f8adb9f4fa710a28c9_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120abbc7e03755048f8adb9f4fa710a28c9_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:42,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/f2221adc63dd4633b32dd28e36beb848, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:42,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/f2221adc63dd4633b32dd28e36beb848 is 175, key is test_row_0/A:col10/1732130741899/Put/seqid=0 2024-11-20T19:25:42,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741946_1122 (size=30955) 2024-11-20T19:25:42,767 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/f2221adc63dd4633b32dd28e36beb848 2024-11-20T19:25:42,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/aa5e109e38324612b6ff714774b3cc22 is 50, key is test_row_0/B:col10/1732130741899/Put/seqid=0 2024-11-20T19:25:42,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130802861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130802864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130802865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130802868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741947_1123 (size=12001) 2024-11-20T19:25:42,870 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/aa5e109e38324612b6ff714774b3cc22 2024-11-20T19:25:42,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T19:25:42,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:42,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130802873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:42,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/b5ae243079d9451e849d22236aab9f38 is 50, key is test_row_0/C:col10/1732130741899/Put/seqid=0 2024-11-20T19:25:42,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741948_1124 (size=12001) 2024-11-20T19:25:42,969 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/b5ae243079d9451e849d22236aab9f38 2024-11-20T19:25:42,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/f2221adc63dd4633b32dd28e36beb848 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/f2221adc63dd4633b32dd28e36beb848 2024-11-20T19:25:42,983 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/f2221adc63dd4633b32dd28e36beb848, entries=150, sequenceid=41, filesize=30.2 K 2024-11-20T19:25:42,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/aa5e109e38324612b6ff714774b3cc22 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/aa5e109e38324612b6ff714774b3cc22 2024-11-20T19:25:42,993 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/aa5e109e38324612b6ff714774b3cc22, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T19:25:42,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/b5ae243079d9451e849d22236aab9f38 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b5ae243079d9451e849d22236aab9f38 2024-11-20T19:25:43,002 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b5ae243079d9451e849d22236aab9f38, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T19:25:43,003 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 77d20aeb75d4c61c8417aef439da7cbf in 468ms, sequenceid=41, compaction requested=false 2024-11-20T19:25:43,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:43,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:43,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=40 2024-11-20T19:25:43,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=40 2024-11-20T19:25:43,014 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-11-20T19:25:43,014 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2410 sec 2024-11-20T19:25:43,016 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees in 1.2570 sec 2024-11-20T19:25:43,167 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:25:43,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:43,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:43,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:43,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:43,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:43,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:43,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:43,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130803209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112016554ed1f210488d80dd94a15ddc9ad9_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130742549/Put/seqid=0 2024-11-20T19:25:43,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130803212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130803216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130803217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130803217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741949_1125 (size=14594) 2024-11-20T19:25:43,275 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:43,280 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112016554ed1f210488d80dd94a15ddc9ad9_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112016554ed1f210488d80dd94a15ddc9ad9_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:43,282 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/6f18b099c6424c5dae4f2efb19051c1e, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:43,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/6f18b099c6424c5dae4f2efb19051c1e is 175, key is test_row_0/A:col10/1732130742549/Put/seqid=0 2024-11-20T19:25:43,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130803319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130803328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130803331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130803334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741950_1126 (size=39549) 2024-11-20T19:25:43,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130803334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,336 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/6f18b099c6424c5dae4f2efb19051c1e 2024-11-20T19:25:43,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/e8f7bb9f7aaf4e808c6aa2e9f5483cd7 is 50, key is test_row_0/B:col10/1732130742549/Put/seqid=0 2024-11-20T19:25:43,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741951_1127 (size=12001) 2024-11-20T19:25:43,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130803532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130803535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130803538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130803539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130803538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130803841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130803842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130803846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130803847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:43,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130803848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:43,860 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/e8f7bb9f7aaf4e808c6aa2e9f5483cd7 2024-11-20T19:25:43,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-11-20T19:25:43,872 INFO [Thread-598 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 39 completed 2024-11-20T19:25:43,876 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:43,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees 2024-11-20T19:25:43,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T19:25:43,885 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:43,886 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:43,886 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:43,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/1cf83c47679146d891f1656b4058cc26 is 50, key is test_row_0/C:col10/1732130742549/Put/seqid=0 2024-11-20T19:25:43,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741952_1128 (size=12001) 2024-11-20T19:25:43,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/1cf83c47679146d891f1656b4058cc26 2024-11-20T19:25:43,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T19:25:43,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/6f18b099c6424c5dae4f2efb19051c1e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/6f18b099c6424c5dae4f2efb19051c1e 2024-11-20T19:25:43,992 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/6f18b099c6424c5dae4f2efb19051c1e, entries=200, sequenceid=53, filesize=38.6 K 2024-11-20T19:25:43,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/e8f7bb9f7aaf4e808c6aa2e9f5483cd7 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/e8f7bb9f7aaf4e808c6aa2e9f5483cd7 2024-11-20T19:25:44,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/e8f7bb9f7aaf4e808c6aa2e9f5483cd7, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T19:25:44,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/1cf83c47679146d891f1656b4058cc26 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1cf83c47679146d891f1656b4058cc26 2024-11-20T19:25:44,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1cf83c47679146d891f1656b4058cc26, entries=150, sequenceid=53, filesize=11.7 K 2024-11-20T19:25:44,008 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 77d20aeb75d4c61c8417aef439da7cbf in 841ms, sequenceid=53, compaction requested=true 2024-11-20T19:25:44,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:44,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:44,009 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:44,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:44,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:44,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:44,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:44,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T19:25:44,009 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:44,010 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:44,010 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/A is initiating minor compaction (all files) 2024-11-20T19:25:44,010 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:44,011 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/B is initiating minor compaction (all files) 2024-11-20T19:25:44,011 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/A in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:44,011 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/B in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:44,011 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7cfb627e481d400684668d96f4847444, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/f2221adc63dd4633b32dd28e36beb848, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/6f18b099c6424c5dae4f2efb19051c1e] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=99.1 K 2024-11-20T19:25:44,011 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/faad00aa7fd3495497cd5cf53189af8e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/aa5e109e38324612b6ff714774b3cc22, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/e8f7bb9f7aaf4e808c6aa2e9f5483cd7] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=35.2 K 2024-11-20T19:25:44,011 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:44,011 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7cfb627e481d400684668d96f4847444, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/f2221adc63dd4633b32dd28e36beb848, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/6f18b099c6424c5dae4f2efb19051c1e] 2024-11-20T19:25:44,011 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7cfb627e481d400684668d96f4847444, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732130741810 2024-11-20T19:25:44,011 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting faad00aa7fd3495497cd5cf53189af8e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732130741810 2024-11-20T19:25:44,012 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting aa5e109e38324612b6ff714774b3cc22, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732130741888 2024-11-20T19:25:44,012 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting f2221adc63dd4633b32dd28e36beb848, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732130741888 2024-11-20T19:25:44,012 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting e8f7bb9f7aaf4e808c6aa2e9f5483cd7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732130742549 2024-11-20T19:25:44,012 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f18b099c6424c5dae4f2efb19051c1e, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732130742549 2024-11-20T19:25:44,032 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#B#compaction#108 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:44,033 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/363dc233dcd74010b54cacdcac3c7d9b is 50, key is test_row_0/B:col10/1732130742549/Put/seqid=0 2024-11-20T19:25:44,038 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,038 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-11-20T19:25:44,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:44,039 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:25:44,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:44,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:44,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:44,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:44,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:44,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:44,049 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:44,101 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120e7475f443dea451a84b9151c3d1695f9_77d20aeb75d4c61c8417aef439da7cbf store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:44,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fa2360a2f41e4461bbbb1a4e9e9169fe_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130743212/Put/seqid=0 2024-11-20T19:25:44,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T19:25:44,190 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120e7475f443dea451a84b9151c3d1695f9_77d20aeb75d4c61c8417aef439da7cbf, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:44,190 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e7475f443dea451a84b9151c3d1695f9_77d20aeb75d4c61c8417aef439da7cbf because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:44,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741953_1129 (size=12104) 2024-11-20T19:25:44,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741955_1131 (size=4469) 2024-11-20T19:25:44,296 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#A#compaction#109 average throughput is 0.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:44,298 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/084f0550723e462ab016d7cb1a5aa0a0 is 175, key is test_row_0/A:col10/1732130742549/Put/seqid=0 2024-11-20T19:25:44,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741954_1130 (size=12154) 2024-11-20T19:25:44,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:44,324 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fa2360a2f41e4461bbbb1a4e9e9169fe_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fa2360a2f41e4461bbbb1a4e9e9169fe_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:44,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/55ddb1f378a6451fabd1191b5b7a05b0, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:44,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/55ddb1f378a6451fabd1191b5b7a05b0 is 175, key is test_row_0/A:col10/1732130743212/Put/seqid=0 2024-11-20T19:25:44,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:44,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:44,372 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130804359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130804367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130804371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130804372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130804372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741957_1133 (size=30955) 2024-11-20T19:25:44,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741956_1132 (size=31058) 2024-11-20T19:25:44,411 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/084f0550723e462ab016d7cb1a5aa0a0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/084f0550723e462ab016d7cb1a5aa0a0 2024-11-20T19:25:44,424 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/A of 77d20aeb75d4c61c8417aef439da7cbf into 084f0550723e462ab016d7cb1a5aa0a0(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:44,424 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:44,424 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/A, priority=13, startTime=1732130744009; duration=0sec 2024-11-20T19:25:44,424 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:44,424 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:A 2024-11-20T19:25:44,424 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:44,426 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:44,427 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/C is initiating minor compaction (all files) 2024-11-20T19:25:44,427 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/C in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:44,427 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/edbee67de31446e5a03fd8cbb2c7aad7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b5ae243079d9451e849d22236aab9f38, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1cf83c47679146d891f1656b4058cc26] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=35.2 K 2024-11-20T19:25:44,427 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting edbee67de31446e5a03fd8cbb2c7aad7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732130741810 2024-11-20T19:25:44,428 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5ae243079d9451e849d22236aab9f38, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732130741888 2024-11-20T19:25:44,428 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1cf83c47679146d891f1656b4058cc26, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732130742549 2024-11-20T19:25:44,451 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#C#compaction#111 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:44,451 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/2df2a93ba3794462b80e8998feb78254 is 50, key is test_row_0/C:col10/1732130742549/Put/seqid=0 2024-11-20T19:25:44,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130804474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130804478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130804478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130804478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130804479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T19:25:44,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741958_1134 (size=12104) 2024-11-20T19:25:44,514 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/2df2a93ba3794462b80e8998feb78254 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/2df2a93ba3794462b80e8998feb78254 2024-11-20T19:25:44,523 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/C of 77d20aeb75d4c61c8417aef439da7cbf into 2df2a93ba3794462b80e8998feb78254(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:44,523 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:44,523 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/C, priority=13, startTime=1732130744009; duration=0sec 2024-11-20T19:25:44,524 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:44,524 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:C 2024-11-20T19:25:44,605 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/363dc233dcd74010b54cacdcac3c7d9b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/363dc233dcd74010b54cacdcac3c7d9b 2024-11-20T19:25:44,618 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/B of 77d20aeb75d4c61c8417aef439da7cbf into 363dc233dcd74010b54cacdcac3c7d9b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:44,618 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:44,618 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/B, priority=13, startTime=1732130744009; duration=0sec 2024-11-20T19:25:44,618 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:44,619 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:B 2024-11-20T19:25:44,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130804680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130804682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130804683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130804683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130804685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,787 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/55ddb1f378a6451fabd1191b5b7a05b0 2024-11-20T19:25:44,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/5ce8f28dd4124264bad7b8e10615f383 is 50, key is test_row_0/B:col10/1732130743212/Put/seqid=0 2024-11-20T19:25:44,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741959_1135 (size=12001) 2024-11-20T19:25:44,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T19:25:44,988 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130804986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130804986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130804990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130804993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:44,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:44,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130804993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:45,256 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/5ce8f28dd4124264bad7b8e10615f383 2024-11-20T19:25:45,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/df69777038eb495f94270a1713ad36e3 is 50, key is test_row_0/C:col10/1732130743212/Put/seqid=0 2024-11-20T19:25:45,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741960_1136 (size=12001) 2024-11-20T19:25:45,322 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/df69777038eb495f94270a1713ad36e3 2024-11-20T19:25:45,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/55ddb1f378a6451fabd1191b5b7a05b0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/55ddb1f378a6451fabd1191b5b7a05b0 2024-11-20T19:25:45,351 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/55ddb1f378a6451fabd1191b5b7a05b0, entries=150, sequenceid=78, filesize=30.2 K 2024-11-20T19:25:45,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/5ce8f28dd4124264bad7b8e10615f383 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/5ce8f28dd4124264bad7b8e10615f383 2024-11-20T19:25:45,360 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/5ce8f28dd4124264bad7b8e10615f383, entries=150, sequenceid=78, filesize=11.7 K 2024-11-20T19:25:45,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/df69777038eb495f94270a1713ad36e3 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/df69777038eb495f94270a1713ad36e3 2024-11-20T19:25:45,369 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/df69777038eb495f94270a1713ad36e3, entries=150, sequenceid=78, filesize=11.7 K 2024-11-20T19:25:45,370 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 77d20aeb75d4c61c8417aef439da7cbf in 1331ms, sequenceid=78, compaction requested=false 2024-11-20T19:25:45,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:45,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:45,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-11-20T19:25:45,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=42 2024-11-20T19:25:45,384 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-11-20T19:25:45,384 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4860 sec 2024-11-20T19:25:45,387 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees in 1.5090 sec 2024-11-20T19:25:45,500 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:25:45,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:45,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:45,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:45,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:45,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:45,501 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:45,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:45,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200b320ea72ac148aaa5e83afb44f25fe6_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130744363/Put/seqid=0 2024-11-20T19:25:45,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741961_1137 (size=14594) 2024-11-20T19:25:45,589 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:45,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130805570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:45,591 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130805575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:45,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130805586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:45,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130805591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:45,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130805591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:45,603 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200b320ea72ac148aaa5e83afb44f25fe6_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200b320ea72ac148aaa5e83afb44f25fe6_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:45,604 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/2648aa553dcd4707bce642d3b7ce4e86, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:45,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/2648aa553dcd4707bce642d3b7ce4e86 is 175, key is test_row_0/A:col10/1732130744363/Put/seqid=0 2024-11-20T19:25:45,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741962_1138 (size=39549) 2024-11-20T19:25:45,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130805694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:45,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130805697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:45,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130805699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:45,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130805701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:45,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130805701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:45,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130805906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:45,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130805906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:45,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130805907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:45,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130805908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:45,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:45,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130805910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:45,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-11-20T19:25:45,990 INFO [Thread-598 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-11-20T19:25:45,993 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:45,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-11-20T19:25:45,995 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:45,997 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:45,997 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:45,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-20T19:25:46,050 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/2648aa553dcd4707bce642d3b7ce4e86 2024-11-20T19:25:46,069 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/aadb8b89094144099e064bb81abb1241 is 50, key is test_row_0/B:col10/1732130744363/Put/seqid=0 2024-11-20T19:25:46,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-20T19:25:46,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741963_1139 (size=12001) 2024-11-20T19:25:46,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/aadb8b89094144099e064bb81abb1241 2024-11-20T19:25:46,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/64220ae1816d4a2385ee996524f2899b is 50, key is test_row_0/C:col10/1732130744363/Put/seqid=0 2024-11-20T19:25:46,152 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-20T19:25:46,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:46,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:46,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:46,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741964_1140 (size=12001) 2024-11-20T19:25:46,180 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/64220ae1816d4a2385ee996524f2899b 2024-11-20T19:25:46,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/2648aa553dcd4707bce642d3b7ce4e86 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2648aa553dcd4707bce642d3b7ce4e86 2024-11-20T19:25:46,202 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2648aa553dcd4707bce642d3b7ce4e86, entries=200, sequenceid=93, filesize=38.6 K 2024-11-20T19:25:46,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/aadb8b89094144099e064bb81abb1241 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/aadb8b89094144099e064bb81abb1241 2024-11-20T19:25:46,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/aadb8b89094144099e064bb81abb1241, entries=150, sequenceid=93, filesize=11.7 K 2024-11-20T19:25:46,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/64220ae1816d4a2385ee996524f2899b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/64220ae1816d4a2385ee996524f2899b 2024-11-20T19:25:46,221 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/64220ae1816d4a2385ee996524f2899b, entries=150, sequenceid=93, filesize=11.7 K 2024-11-20T19:25:46,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 77d20aeb75d4c61c8417aef439da7cbf in 722ms, sequenceid=93, compaction requested=true 2024-11-20T19:25:46,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:46,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:46,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:46,227 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:46,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:46,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:46,227 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:46,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:46,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:46,229 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:46,229 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/B is initiating minor compaction (all files) 2024-11-20T19:25:46,229 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/B in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:46,229 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/363dc233dcd74010b54cacdcac3c7d9b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/5ce8f28dd4124264bad7b8e10615f383, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/aadb8b89094144099e064bb81abb1241] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=35.3 K 2024-11-20T19:25:46,230 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:46,230 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 363dc233dcd74010b54cacdcac3c7d9b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732130742549 2024-11-20T19:25:46,230 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/A is initiating minor compaction (all files) 2024-11-20T19:25:46,230 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/A in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:46,230 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/084f0550723e462ab016d7cb1a5aa0a0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/55ddb1f378a6451fabd1191b5b7a05b0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2648aa553dcd4707bce642d3b7ce4e86] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=99.2 K 2024-11-20T19:25:46,230 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:46,230 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/084f0550723e462ab016d7cb1a5aa0a0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/55ddb1f378a6451fabd1191b5b7a05b0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2648aa553dcd4707bce642d3b7ce4e86] 2024-11-20T19:25:46,231 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ce8f28dd4124264bad7b8e10615f383, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732130743198 2024-11-20T19:25:46,231 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 084f0550723e462ab016d7cb1a5aa0a0, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732130742549 2024-11-20T19:25:46,231 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting aadb8b89094144099e064bb81abb1241, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732130744357 2024-11-20T19:25:46,231 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55ddb1f378a6451fabd1191b5b7a05b0, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732130743198 2024-11-20T19:25:46,232 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2648aa553dcd4707bce642d3b7ce4e86, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732130744357 2024-11-20T19:25:46,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:46,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:25:46,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:46,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:46,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:46,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:46,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:46,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:46,255 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:46,268 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#B#compaction#118 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:46,268 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/787cee67c14a4ba2b0af9b7a1a49cd24 is 50, key is test_row_0/B:col10/1732130744363/Put/seqid=0 2024-11-20T19:25:46,279 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130806265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130806267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130806268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130806269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130806281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,297 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120d0f8ba5469c04152bbd9c0b162613643_77d20aeb75d4c61c8417aef439da7cbf store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:46,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-20T19:25:46,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112076c0c63ec84b4385bf5fa8c6cc91e951_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130746241/Put/seqid=0 2024-11-20T19:25:46,307 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120d0f8ba5469c04152bbd9c0b162613643_77d20aeb75d4c61c8417aef439da7cbf, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:46,307 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,307 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d0f8ba5469c04152bbd9c0b162613643_77d20aeb75d4c61c8417aef439da7cbf because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:46,308 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-20T19:25:46,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:46,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:46,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:46,309 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741965_1141 (size=12207) 2024-11-20T19:25:46,353 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/787cee67c14a4ba2b0af9b7a1a49cd24 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/787cee67c14a4ba2b0af9b7a1a49cd24 2024-11-20T19:25:46,360 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/B of 77d20aeb75d4c61c8417aef439da7cbf into 787cee67c14a4ba2b0af9b7a1a49cd24(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:46,360 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:46,360 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/B, priority=13, startTime=1732130746227; duration=0sec 2024-11-20T19:25:46,361 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:46,361 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:B 2024-11-20T19:25:46,361 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:46,362 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:46,363 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/C is initiating minor compaction (all files) 2024-11-20T19:25:46,363 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/C in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:46,363 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/2df2a93ba3794462b80e8998feb78254, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/df69777038eb495f94270a1713ad36e3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/64220ae1816d4a2385ee996524f2899b] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=35.3 K 2024-11-20T19:25:46,363 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 2df2a93ba3794462b80e8998feb78254, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732130742549 2024-11-20T19:25:46,364 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting df69777038eb495f94270a1713ad36e3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732130743198 2024-11-20T19:25:46,364 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 64220ae1816d4a2385ee996524f2899b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732130744357 2024-11-20T19:25:46,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741966_1142 (size=19474) 2024-11-20T19:25:46,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130806381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130806383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130806383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130806392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,403 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130806399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741967_1143 (size=4469) 2024-11-20T19:25:46,419 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#C#compaction#120 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:46,419 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/e1bf5517961b4cd7add97897780f4b8e is 50, key is test_row_0/C:col10/1732130744363/Put/seqid=0 2024-11-20T19:25:46,462 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-20T19:25:46,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:46,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:46,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:46,465 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741968_1144 (size=12207) 2024-11-20T19:25:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,476 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/e1bf5517961b4cd7add97897780f4b8e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/e1bf5517961b4cd7add97897780f4b8e 2024-11-20T19:25:46,484 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/C of 77d20aeb75d4c61c8417aef439da7cbf into e1bf5517961b4cd7add97897780f4b8e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:46,484 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:46,484 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/C, priority=13, startTime=1732130746227; duration=0sec 2024-11-20T19:25:46,484 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:46,484 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:C 2024-11-20T19:25:46,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130806590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130806590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-20T19:25:46,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130806599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130806601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,618 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,619 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-20T19:25:46,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:46,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:46,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:46,619 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130806606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,772 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,772 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-20T19:25:46,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:46,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:46,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:46,773 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,787 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:46,795 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112076c0c63ec84b4385bf5fa8c6cc91e951_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112076c0c63ec84b4385bf5fa8c6cc91e951_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:46,796 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/d14125ddda1040fa95e21dc6e6442a64, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:46,797 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/d14125ddda1040fa95e21dc6e6442a64 is 175, key is test_row_0/A:col10/1732130746241/Put/seqid=0 2024-11-20T19:25:46,814 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#A#compaction#117 average throughput is 0.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:46,815 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/eebe6f49b5144d0cb8c8caa685ae23e1 is 175, key is test_row_0/A:col10/1732130744363/Put/seqid=0 2024-11-20T19:25:46,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741969_1145 (size=56733) 2024-11-20T19:25:46,833 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=120, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/d14125ddda1040fa95e21dc6e6442a64 2024-11-20T19:25:46,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/77772554cda24d82beb6dca6dd2b4d37 is 50, key is test_row_0/B:col10/1732130746241/Put/seqid=0 2024-11-20T19:25:46,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130806897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130806897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741970_1146 (size=31161) 2024-11-20T19:25:46,909 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/eebe6f49b5144d0cb8c8caa685ae23e1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/eebe6f49b5144d0cb8c8caa685ae23e1 2024-11-20T19:25:46,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130806907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130806910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,925 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,926 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-20T19:25:46,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:46,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:46,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:46,926 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:46,929 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/A of 77d20aeb75d4c61c8417aef439da7cbf into eebe6f49b5144d0cb8c8caa685ae23e1(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:46,930 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:46,930 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/A, priority=13, startTime=1732130746227; duration=0sec 2024-11-20T19:25:46,930 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:46,930 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:A 2024-11-20T19:25:46,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:46,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130806927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:46,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741971_1147 (size=12001) 2024-11-20T19:25:46,935 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/77772554cda24d82beb6dca6dd2b4d37 2024-11-20T19:25:46,957 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/736622f00f89484a9e23913911f0db1f is 50, key is test_row_0/C:col10/1732130746241/Put/seqid=0 2024-11-20T19:25:46,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741972_1148 (size=12001) 2024-11-20T19:25:46,985 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/736622f00f89484a9e23913911f0db1f 2024-11-20T19:25:46,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/d14125ddda1040fa95e21dc6e6442a64 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/d14125ddda1040fa95e21dc6e6442a64 2024-11-20T19:25:47,002 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/d14125ddda1040fa95e21dc6e6442a64, entries=300, sequenceid=120, filesize=55.4 K 2024-11-20T19:25:47,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/77772554cda24d82beb6dca6dd2b4d37 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/77772554cda24d82beb6dca6dd2b4d37 2024-11-20T19:25:47,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/77772554cda24d82beb6dca6dd2b4d37, entries=150, sequenceid=120, filesize=11.7 K 2024-11-20T19:25:47,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/736622f00f89484a9e23913911f0db1f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/736622f00f89484a9e23913911f0db1f 2024-11-20T19:25:47,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/736622f00f89484a9e23913911f0db1f, entries=150, sequenceid=120, filesize=11.7 K 2024-11-20T19:25:47,021 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 77d20aeb75d4c61c8417aef439da7cbf in 779ms, sequenceid=120, compaction requested=false 2024-11-20T19:25:47,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:47,078 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-20T19:25:47,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:47,079 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T19:25:47,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:47,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:47,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:47,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:47,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:47,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:47,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-20T19:25:47,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ea730ebcfe7f4926a9296a97621f2ee5_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130746267/Put/seqid=0 2024-11-20T19:25:47,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741973_1149 (size=12204) 2024-11-20T19:25:47,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:47,177 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ea730ebcfe7f4926a9296a97621f2ee5_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ea730ebcfe7f4926a9296a97621f2ee5_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:47,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/12fd047c5f4c465dbfb1a4cad9c308d4, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:47,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/12fd047c5f4c465dbfb1a4cad9c308d4 is 175, key is test_row_0/A:col10/1732130746267/Put/seqid=0 2024-11-20T19:25:47,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741974_1150 (size=31005) 2024-11-20T19:25:47,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:47,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:47,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130807474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130807476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130807480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130807484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130807486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,591 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130807586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,591 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130807588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130807591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130807599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130807599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,643 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/12fd047c5f4c465dbfb1a4cad9c308d4 2024-11-20T19:25:47,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/87d90c8ac68241fcb426f156136d33be is 50, key is test_row_0/B:col10/1732130746267/Put/seqid=0 2024-11-20T19:25:47,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741975_1151 (size=12051) 2024-11-20T19:25:47,696 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/87d90c8ac68241fcb426f156136d33be 2024-11-20T19:25:47,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/818a4407e8c847659382f06069e89e77 is 50, key is test_row_0/C:col10/1732130746267/Put/seqid=0 2024-11-20T19:25:47,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741976_1152 (size=12051) 2024-11-20T19:25:47,748 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/818a4407e8c847659382f06069e89e77 2024-11-20T19:25:47,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/12fd047c5f4c465dbfb1a4cad9c308d4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/12fd047c5f4c465dbfb1a4cad9c308d4 2024-11-20T19:25:47,773 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/12fd047c5f4c465dbfb1a4cad9c308d4, entries=150, sequenceid=133, filesize=30.3 K 2024-11-20T19:25:47,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/87d90c8ac68241fcb426f156136d33be as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/87d90c8ac68241fcb426f156136d33be 2024-11-20T19:25:47,783 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/87d90c8ac68241fcb426f156136d33be, entries=150, sequenceid=133, filesize=11.8 K 2024-11-20T19:25:47,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/818a4407e8c847659382f06069e89e77 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/818a4407e8c847659382f06069e89e77 2024-11-20T19:25:47,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130807793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130807793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130807799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,809 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/818a4407e8c847659382f06069e89e77, entries=150, sequenceid=133, filesize=11.8 K 2024-11-20T19:25:47,811 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 77d20aeb75d4c61c8417aef439da7cbf in 731ms, sequenceid=133, compaction requested=true 2024-11-20T19:25:47,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:47,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:47,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-11-20T19:25:47,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-11-20T19:25:47,815 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-11-20T19:25:47,816 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8170 sec 2024-11-20T19:25:47,820 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 1.8250 sec 2024-11-20T19:25:47,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:47,823 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T19:25:47,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:47,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:47,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:47,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:47,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:47,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:47,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130807847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130807848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204fa80d4392f6492e8bc501a96d837230_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130747483/Put/seqid=0 2024-11-20T19:25:47,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741977_1153 (size=14794) 2024-11-20T19:25:47,908 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:47,915 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204fa80d4392f6492e8bc501a96d837230_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204fa80d4392f6492e8bc501a96d837230_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:47,916 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/dc2018e14ff741269aee7a961a5ff050, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:47,917 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/dc2018e14ff741269aee7a961a5ff050 is 175, key is test_row_0/A:col10/1732130747483/Put/seqid=0 2024-11-20T19:25:47,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741978_1154 (size=39749) 2024-11-20T19:25:47,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130807955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:47,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130807956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:47,964 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=161, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/dc2018e14ff741269aee7a961a5ff050 2024-11-20T19:25:47,984 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/4a49c332ee1740ff925ccbc2246eaf4a is 50, key is test_row_0/B:col10/1732130747483/Put/seqid=0 2024-11-20T19:25:48,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741979_1155 (size=12151) 2024-11-20T19:25:48,039 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/4a49c332ee1740ff925ccbc2246eaf4a 2024-11-20T19:25:48,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/5b35d29d1ce54e8faa979949a09659d2 is 50, key is test_row_0/C:col10/1732130747483/Put/seqid=0 2024-11-20T19:25:48,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741980_1156 (size=12151) 2024-11-20T19:25:48,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/5b35d29d1ce54e8faa979949a09659d2 2024-11-20T19:25:48,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/dc2018e14ff741269aee7a961a5ff050 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/dc2018e14ff741269aee7a961a5ff050 2024-11-20T19:25:48,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-20T19:25:48,105 INFO [Thread-598 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-11-20T19:25:48,106 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:48,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130808102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-11-20T19:25:48,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T19:25:48,110 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:48,110 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:48,111 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:48,113 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/dc2018e14ff741269aee7a961a5ff050, entries=200, sequenceid=161, filesize=38.8 K 2024-11-20T19:25:48,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/4a49c332ee1740ff925ccbc2246eaf4a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/4a49c332ee1740ff925ccbc2246eaf4a 2024-11-20T19:25:48,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/4a49c332ee1740ff925ccbc2246eaf4a, entries=150, sequenceid=161, filesize=11.9 K 2024-11-20T19:25:48,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/5b35d29d1ce54e8faa979949a09659d2 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/5b35d29d1ce54e8faa979949a09659d2 2024-11-20T19:25:48,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130808112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130808113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/5b35d29d1ce54e8faa979949a09659d2, entries=150, sequenceid=161, filesize=11.9 K 2024-11-20T19:25:48,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 77d20aeb75d4c61c8417aef439da7cbf in 321ms, sequenceid=161, compaction requested=true 2024-11-20T19:25:48,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:48,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:48,145 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:48,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:48,145 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:48,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:48,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:48,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:48,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:48,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,151 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 158648 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:48,151 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/A is initiating minor compaction (all files) 2024-11-20T19:25:48,151 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/A in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:48,151 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/eebe6f49b5144d0cb8c8caa685ae23e1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/d14125ddda1040fa95e21dc6e6442a64, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/12fd047c5f4c465dbfb1a4cad9c308d4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/dc2018e14ff741269aee7a961a5ff050] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=154.9 K 2024-11-20T19:25:48,151 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:48,152 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/eebe6f49b5144d0cb8c8caa685ae23e1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/d14125ddda1040fa95e21dc6e6442a64, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/12fd047c5f4c465dbfb1a4cad9c308d4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/dc2018e14ff741269aee7a961a5ff050] 2024-11-20T19:25:48,152 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting eebe6f49b5144d0cb8c8caa685ae23e1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732130744357 2024-11-20T19:25:48,153 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d14125ddda1040fa95e21dc6e6442a64, keycount=300, bloomtype=ROW, size=55.4 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732130745536 2024-11-20T19:25:48,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,155 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12fd047c5f4c465dbfb1a4cad9c308d4, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732130746265 2024-11-20T19:25:48,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,156 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc2018e14ff741269aee7a961a5ff050, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732130747470 2024-11-20T19:25:48,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,160 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48410 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:48,161 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/B is initiating minor compaction (all files) 2024-11-20T19:25:48,161 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/B in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:48,161 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/787cee67c14a4ba2b0af9b7a1a49cd24, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/77772554cda24d82beb6dca6dd2b4d37, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/87d90c8ac68241fcb426f156136d33be, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/4a49c332ee1740ff925ccbc2246eaf4a] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=47.3 K 2024-11-20T19:25:48,162 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 787cee67c14a4ba2b0af9b7a1a49cd24, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732130744357 2024-11-20T19:25:48,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,162 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 77772554cda24d82beb6dca6dd2b4d37, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732130745585 2024-11-20T19:25:48,163 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 87d90c8ac68241fcb426f156136d33be, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732130746265 2024-11-20T19:25:48,164 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a49c332ee1740ff925ccbc2246eaf4a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732130747472 2024-11-20T19:25:48,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,184 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:48,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T19:25:48,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,211 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#B#compaction#130 average throughput is 0.26 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:48,212 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/d95b52449de34698a7ad0394525c8bc0 is 50, key is test_row_0/B:col10/1732130747483/Put/seqid=0 2024-11-20T19:25:48,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,217 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112097343874baaa4e90a51ae5bdc3b78ce4_77d20aeb75d4c61c8417aef439da7cbf store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:48,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:48,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:48,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:48,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:48,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:48,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:48,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:48,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:48,236 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112097343874baaa4e90a51ae5bdc3b78ce4_77d20aeb75d4c61c8417aef439da7cbf, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:48,237 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112097343874baaa4e90a51ae5bdc3b78ce4_77d20aeb75d4c61c8417aef439da7cbf because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:48,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,263 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,263 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:48,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:48,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:48,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:48,264 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:48,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:48,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741981_1157 (size=12493) 2024-11-20T19:25:48,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:48,271 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/d95b52449de34698a7ad0394525c8bc0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/d95b52449de34698a7ad0394525c8bc0 2024-11-20T19:25:48,282 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/B of 77d20aeb75d4c61c8417aef439da7cbf into d95b52449de34698a7ad0394525c8bc0(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:48,282 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:48,282 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/B, priority=12, startTime=1732130748145; duration=0sec 2024-11-20T19:25:48,282 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:48,282 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:B 2024-11-20T19:25:48,282 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:48,289 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48410 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:48,290 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/C is initiating minor compaction (all files) 2024-11-20T19:25:48,290 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/C in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:48,290 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/e1bf5517961b4cd7add97897780f4b8e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/736622f00f89484a9e23913911f0db1f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/818a4407e8c847659382f06069e89e77, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/5b35d29d1ce54e8faa979949a09659d2] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=47.3 K 2024-11-20T19:25:48,290 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting e1bf5517961b4cd7add97897780f4b8e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732130744357 2024-11-20T19:25:48,291 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 736622f00f89484a9e23913911f0db1f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1732130745585 2024-11-20T19:25:48,291 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 818a4407e8c847659382f06069e89e77, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732130746265 2024-11-20T19:25:48,291 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b35d29d1ce54e8faa979949a09659d2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732130747472 2024-11-20T19:25:48,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741982_1158 (size=4469) 2024-11-20T19:25:48,326 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#A#compaction#129 average throughput is 0.17 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:48,327 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/2ef22921e9644e1dbfc6b43123f78e98 is 175, key is test_row_0/A:col10/1732130747483/Put/seqid=0 2024-11-20T19:25:48,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120539b4fb865dd4533ac72e2d94d89775b_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130748232/Put/seqid=0 2024-11-20T19:25:48,350 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#C#compaction#132 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:48,351 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/76c3bb2ef2fc4e60ab01485c73f49f69 is 50, key is test_row_0/C:col10/1732130747483/Put/seqid=0 2024-11-20T19:25:48,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741983_1159 (size=31447) 2024-11-20T19:25:48,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T19:25:48,412 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/2ef22921e9644e1dbfc6b43123f78e98 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2ef22921e9644e1dbfc6b43123f78e98 2024-11-20T19:25:48,416 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,417 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:48,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:48,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:48,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:48,418 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:48,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:48,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:48,419 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/A of 77d20aeb75d4c61c8417aef439da7cbf into 2ef22921e9644e1dbfc6b43123f78e98(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:48,419 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:48,419 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/A, priority=12, startTime=1732130748145; duration=0sec 2024-11-20T19:25:48,420 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:48,420 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:A 2024-11-20T19:25:48,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741985_1161 (size=12493) 2024-11-20T19:25:48,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741984_1160 (size=19774) 2024-11-20T19:25:48,448 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:48,449 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/76c3bb2ef2fc4e60ab01485c73f49f69 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/76c3bb2ef2fc4e60ab01485c73f49f69 2024-11-20T19:25:48,462 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120539b4fb865dd4533ac72e2d94d89775b_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120539b4fb865dd4533ac72e2d94d89775b_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:48,462 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/C of 77d20aeb75d4c61c8417aef439da7cbf into 76c3bb2ef2fc4e60ab01485c73f49f69(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:48,463 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:48,463 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/C, priority=12, startTime=1732130748146; duration=0sec 2024-11-20T19:25:48,463 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:48,463 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:C 2024-11-20T19:25:48,465 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/60b3e79ab452429587f4cee84e8a7cdf, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:48,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/60b3e79ab452429587f4cee84e8a7cdf is 175, key is test_row_0/A:col10/1732130748232/Put/seqid=0 2024-11-20T19:25:48,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130808493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130808499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741986_1162 (size=57033) 2024-11-20T19:25:48,569 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:48,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:48,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:48,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:48,570 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:48,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:48,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:48,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130808602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130808607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130808614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130808640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130808646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T19:25:48,724 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:48,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:48,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:48,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:48,725 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:48,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:48,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:48,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130808816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:48,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130808816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,878 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:48,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:48,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:48,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:48,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:48,879 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:48,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:48,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:48,920 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=173, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/60b3e79ab452429587f4cee84e8a7cdf 2024-11-20T19:25:48,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/6ca2956fd69140e2aebdf51edefabcd1 is 50, key is test_row_0/B:col10/1732130748232/Put/seqid=0 2024-11-20T19:25:49,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741987_1163 (size=12151) 2024-11-20T19:25:49,018 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/6ca2956fd69140e2aebdf51edefabcd1 2024-11-20T19:25:49,032 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:49,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:49,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:49,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:49,033 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,036 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/4b285f7f84dc47e1b166a51465d800ba is 50, key is test_row_0/C:col10/1732130748232/Put/seqid=0 2024-11-20T19:25:49,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741988_1164 (size=12151) 2024-11-20T19:25:49,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130809123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130809125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,184 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,185 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:49,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:49,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:49,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:49,185 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T19:25:49,221 INFO [master/db9c3a6c6492:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-20T19:25:49,222 INFO [master/db9c3a6c6492:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-20T19:25:49,338 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:49,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:49,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:49,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:49,339 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,473 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/4b285f7f84dc47e1b166a51465d800ba 2024-11-20T19:25:49,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/60b3e79ab452429587f4cee84e8a7cdf as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/60b3e79ab452429587f4cee84e8a7cdf 2024-11-20T19:25:49,490 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/60b3e79ab452429587f4cee84e8a7cdf, entries=300, sequenceid=173, filesize=55.7 K 2024-11-20T19:25:49,491 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:49,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:49,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:49,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/6ca2956fd69140e2aebdf51edefabcd1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/6ca2956fd69140e2aebdf51edefabcd1 2024-11-20T19:25:49,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:49,492 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/6ca2956fd69140e2aebdf51edefabcd1, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T19:25:49,502 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/4b285f7f84dc47e1b166a51465d800ba as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4b285f7f84dc47e1b166a51465d800ba 2024-11-20T19:25:49,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4b285f7f84dc47e1b166a51465d800ba, entries=150, sequenceid=173, filesize=11.9 K 2024-11-20T19:25:49,510 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 77d20aeb75d4c61c8417aef439da7cbf in 1276ms, sequenceid=173, compaction requested=false 2024-11-20T19:25:49,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:49,626 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:25:49,627 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:49,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:49,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:49,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:49,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:49,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:49,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:49,645 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:49,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:49,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:49,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:49,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a090710bd346452b944e359ec3304091_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130748470/Put/seqid=0 2024-11-20T19:25:49,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130809667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130809667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130809667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130809670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130809668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741989_1165 (size=12304) 2024-11-20T19:25:49,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130809783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130809786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130809787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130809790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130809791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,798 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:49,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:49,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:49,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:49,803 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,963 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,963 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:49,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:49,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:49,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:49,964 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:49,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130809991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130809992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130809995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:49,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:49,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130809995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130809996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,115 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,115 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:50,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:50,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:50,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:50,116 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,137 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:50,142 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a090710bd346452b944e359ec3304091_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a090710bd346452b944e359ec3304091_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:50,144 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/d93bc65c53d7436e9eefa99dfb69b6fb, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:50,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/d93bc65c53d7436e9eefa99dfb69b6fb is 175, key is test_row_0/A:col10/1732130748470/Put/seqid=0 2024-11-20T19:25:50,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741990_1166 (size=31105) 2024-11-20T19:25:50,171 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=201, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/d93bc65c53d7436e9eefa99dfb69b6fb 2024-11-20T19:25:50,188 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/1abf032d111a4135bfb166cd927358ad is 50, key is test_row_0/B:col10/1732130748470/Put/seqid=0 2024-11-20T19:25:50,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T19:25:50,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741991_1167 (size=12151) 2024-11-20T19:25:50,227 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/1abf032d111a4135bfb166cd927358ad 2024-11-20T19:25:50,252 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/85396a2be66f44dab5f45d2d9066f063 is 50, key is test_row_0/C:col10/1732130748470/Put/seqid=0 2024-11-20T19:25:50,269 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,270 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:50,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:50,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:50,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:50,270 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:50,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130810297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130810298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130810301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130810302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130810303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741992_1168 (size=12151) 2024-11-20T19:25:50,312 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/85396a2be66f44dab5f45d2d9066f063 2024-11-20T19:25:50,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/d93bc65c53d7436e9eefa99dfb69b6fb as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/d93bc65c53d7436e9eefa99dfb69b6fb 2024-11-20T19:25:50,327 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/d93bc65c53d7436e9eefa99dfb69b6fb, entries=150, sequenceid=201, filesize=30.4 K 2024-11-20T19:25:50,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/1abf032d111a4135bfb166cd927358ad as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/1abf032d111a4135bfb166cd927358ad 2024-11-20T19:25:50,338 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/1abf032d111a4135bfb166cd927358ad, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T19:25:50,340 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/85396a2be66f44dab5f45d2d9066f063 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/85396a2be66f44dab5f45d2d9066f063 2024-11-20T19:25:50,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/85396a2be66f44dab5f45d2d9066f063, entries=150, sequenceid=201, filesize=11.9 K 2024-11-20T19:25:50,350 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 77d20aeb75d4c61c8417aef439da7cbf in 724ms, sequenceid=201, compaction requested=true 2024-11-20T19:25:50,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:50,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:50,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:50,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:50,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T19:25:50,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:50,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T19:25:50,351 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:50,351 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:50,353 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 119585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:50,353 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/A is initiating minor compaction (all files) 2024-11-20T19:25:50,353 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:50,353 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/C is initiating minor compaction (all files) 2024-11-20T19:25:50,353 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/A in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:50,353 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/C in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:50,353 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2ef22921e9644e1dbfc6b43123f78e98, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/60b3e79ab452429587f4cee84e8a7cdf, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/d93bc65c53d7436e9eefa99dfb69b6fb] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=116.8 K 2024-11-20T19:25:50,353 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/76c3bb2ef2fc4e60ab01485c73f49f69, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4b285f7f84dc47e1b166a51465d800ba, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/85396a2be66f44dab5f45d2d9066f063] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=35.9 K 2024-11-20T19:25:50,353 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:50,353 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2ef22921e9644e1dbfc6b43123f78e98, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/60b3e79ab452429587f4cee84e8a7cdf, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/d93bc65c53d7436e9eefa99dfb69b6fb] 2024-11-20T19:25:50,354 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 76c3bb2ef2fc4e60ab01485c73f49f69, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732130747472 2024-11-20T19:25:50,354 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ef22921e9644e1dbfc6b43123f78e98, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732130747472 2024-11-20T19:25:50,355 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60b3e79ab452429587f4cee84e8a7cdf, keycount=300, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732130747838 2024-11-20T19:25:50,355 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b285f7f84dc47e1b166a51465d800ba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732130748186 2024-11-20T19:25:50,355 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d93bc65c53d7436e9eefa99dfb69b6fb, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732130748470 2024-11-20T19:25:50,356 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 85396a2be66f44dab5f45d2d9066f063, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732130748470 2024-11-20T19:25:50,381 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:50,396 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#C#compaction#139 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:50,397 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/4296083ef8b34351b4b8ee9ebfa9ebeb is 50, key is test_row_0/C:col10/1732130748470/Put/seqid=0 2024-11-20T19:25:50,403 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120a212e0fb092b496a8a8e11a69efa20bc_77d20aeb75d4c61c8417aef439da7cbf store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:50,405 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120a212e0fb092b496a8a8e11a69efa20bc_77d20aeb75d4c61c8417aef439da7cbf, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:50,406 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a212e0fb092b496a8a8e11a69efa20bc_77d20aeb75d4c61c8417aef439da7cbf because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:50,423 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-20T19:25:50,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:50,433 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:25:50,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:50,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:50,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:50,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:50,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:50,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:50,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741993_1169 (size=12595) 2024-11-20T19:25:50,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741994_1170 (size=4469) 2024-11-20T19:25:50,488 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#A#compaction#138 average throughput is 0.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:50,488 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/4ad7e95a95634e12a374cf7f4fe975c6 is 175, key is test_row_0/A:col10/1732130748470/Put/seqid=0 2024-11-20T19:25:50,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c318ddc2b54c44f3b0572e02283e6c13_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130749666/Put/seqid=0 2024-11-20T19:25:50,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741995_1171 (size=31549) 2024-11-20T19:25:50,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741996_1172 (size=12304) 2024-11-20T19:25:50,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:50,574 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c318ddc2b54c44f3b0572e02283e6c13_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c318ddc2b54c44f3b0572e02283e6c13_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:50,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/40f35ebe77ee42f2852d3d64cb898707, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:50,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/40f35ebe77ee42f2852d3d64cb898707 is 175, key is test_row_0/A:col10/1732130749666/Put/seqid=0 2024-11-20T19:25:50,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741997_1173 (size=31105) 2024-11-20T19:25:50,613 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=214, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/40f35ebe77ee42f2852d3d64cb898707 2024-11-20T19:25:50,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/3eff0e3944d4415aa0afe18d3847d05c is 50, key is test_row_0/B:col10/1732130749666/Put/seqid=0 2024-11-20T19:25:50,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741998_1174 (size=12151) 2024-11-20T19:25:50,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:50,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:50,873 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/4296083ef8b34351b4b8ee9ebfa9ebeb as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4296083ef8b34351b4b8ee9ebfa9ebeb 2024-11-20T19:25:50,885 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/C of 77d20aeb75d4c61c8417aef439da7cbf into 4296083ef8b34351b4b8ee9ebfa9ebeb(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:50,885 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:50,885 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/C, priority=13, startTime=1732130750351; duration=0sec 2024-11-20T19:25:50,885 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:50,885 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:C 2024-11-20T19:25:50,885 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:50,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130810861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130810865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130810866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,888 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:50,890 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/B is initiating minor compaction (all files) 2024-11-20T19:25:50,891 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/B in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:50,892 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/d95b52449de34698a7ad0394525c8bc0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/6ca2956fd69140e2aebdf51edefabcd1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/1abf032d111a4135bfb166cd927358ad] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=35.9 K 2024-11-20T19:25:50,893 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting d95b52449de34698a7ad0394525c8bc0, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732130747472 2024-11-20T19:25:50,894 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ca2956fd69140e2aebdf51edefabcd1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732130748186 2024-11-20T19:25:50,894 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 1abf032d111a4135bfb166cd927358ad, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732130748470 2024-11-20T19:25:50,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130810887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130810887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,918 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#B#compaction#142 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:50,925 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/70ca66ec0c8f43b487a703bc65278c1c is 50, key is test_row_0/B:col10/1732130748470/Put/seqid=0 2024-11-20T19:25:50,964 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/4ad7e95a95634e12a374cf7f4fe975c6 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/4ad7e95a95634e12a374cf7f4fe975c6 2024-11-20T19:25:50,971 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/A of 77d20aeb75d4c61c8417aef439da7cbf into 4ad7e95a95634e12a374cf7f4fe975c6(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:50,971 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:50,971 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/A, priority=13, startTime=1732130750351; duration=0sec 2024-11-20T19:25:50,972 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:50,972 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:A 2024-11-20T19:25:50,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741999_1175 (size=12595) 2024-11-20T19:25:50,982 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/70ca66ec0c8f43b487a703bc65278c1c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/70ca66ec0c8f43b487a703bc65278c1c 2024-11-20T19:25:50,990 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/B of 77d20aeb75d4c61c8417aef439da7cbf into 70ca66ec0c8f43b487a703bc65278c1c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:50,990 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:50,991 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/B, priority=13, startTime=1732130750351; duration=0sec 2024-11-20T19:25:50,991 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:50,991 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:B 2024-11-20T19:25:50,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130810989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130810990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:50,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:50,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130810990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130810999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130811001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,044 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/3eff0e3944d4415aa0afe18d3847d05c 2024-11-20T19:25:51,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/1ce09834e8964ceabfd1921f04d912fa is 50, key is test_row_0/C:col10/1732130749666/Put/seqid=0 2024-11-20T19:25:51,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742000_1176 (size=12151) 2024-11-20T19:25:51,142 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/1ce09834e8964ceabfd1921f04d912fa 2024-11-20T19:25:51,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/40f35ebe77ee42f2852d3d64cb898707 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/40f35ebe77ee42f2852d3d64cb898707 2024-11-20T19:25:51,155 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/40f35ebe77ee42f2852d3d64cb898707, entries=150, sequenceid=214, filesize=30.4 K 2024-11-20T19:25:51,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/3eff0e3944d4415aa0afe18d3847d05c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/3eff0e3944d4415aa0afe18d3847d05c 2024-11-20T19:25:51,167 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/3eff0e3944d4415aa0afe18d3847d05c, entries=150, sequenceid=214, filesize=11.9 K 2024-11-20T19:25:51,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/1ce09834e8964ceabfd1921f04d912fa as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1ce09834e8964ceabfd1921f04d912fa 2024-11-20T19:25:51,175 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1ce09834e8964ceabfd1921f04d912fa, entries=150, sequenceid=214, filesize=11.9 K 2024-11-20T19:25:51,177 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 77d20aeb75d4c61c8417aef439da7cbf in 744ms, sequenceid=214, compaction requested=false 2024-11-20T19:25:51,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:51,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:51,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-11-20T19:25:51,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-11-20T19:25:51,181 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-20T19:25:51,181 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0680 sec 2024-11-20T19:25:51,183 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 3.0760 sec 2024-11-20T19:25:51,198 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:25:51,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:51,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:51,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:51,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:51,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:51,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:51,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:51,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130811226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130811227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130811229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130811231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130811233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112096a7ba5a9e9c442f913e4678a5468322_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130751198/Put/seqid=0 2024-11-20T19:25:51,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742001_1177 (size=12304) 2024-11-20T19:25:51,311 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:51,317 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112096a7ba5a9e9c442f913e4678a5468322_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112096a7ba5a9e9c442f913e4678a5468322_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:51,318 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/7d4e130a9079438190577153ae72419d, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:51,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/7d4e130a9079438190577153ae72419d is 175, key is test_row_0/A:col10/1732130751198/Put/seqid=0 2024-11-20T19:25:51,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130811334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130811337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130811338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130811339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130811344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742002_1178 (size=31105) 2024-11-20T19:25:51,366 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=243, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/7d4e130a9079438190577153ae72419d 2024-11-20T19:25:51,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/12df09f6a2b14ebfb22064592597eb99 is 50, key is test_row_0/B:col10/1732130751198/Put/seqid=0 2024-11-20T19:25:51,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742003_1179 (size=12151) 2024-11-20T19:25:51,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130811546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130811546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130811547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130811547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130811554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,853 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/12df09f6a2b14ebfb22064592597eb99 2024-11-20T19:25:51,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130811852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130811854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130811856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130811856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:51,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130811857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:51,884 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/f021b3338c544e809d06d4f7a5bfa94d is 50, key is test_row_0/C:col10/1732130751198/Put/seqid=0 2024-11-20T19:25:51,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742004_1180 (size=12151) 2024-11-20T19:25:52,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-20T19:25:52,215 INFO [Thread-598 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-11-20T19:25:52,218 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:52,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-20T19:25:52,221 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:52,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T19:25:52,222 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:52,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:52,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T19:25:52,335 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/f021b3338c544e809d06d4f7a5bfa94d 2024-11-20T19:25:52,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/7d4e130a9079438190577153ae72419d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7d4e130a9079438190577153ae72419d 2024-11-20T19:25:52,348 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7d4e130a9079438190577153ae72419d, entries=150, sequenceid=243, filesize=30.4 K 2024-11-20T19:25:52,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/12df09f6a2b14ebfb22064592597eb99 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/12df09f6a2b14ebfb22064592597eb99 2024-11-20T19:25:52,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/12df09f6a2b14ebfb22064592597eb99, entries=150, sequenceid=243, filesize=11.9 K 2024-11-20T19:25:52,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/f021b3338c544e809d06d4f7a5bfa94d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/f021b3338c544e809d06d4f7a5bfa94d 2024-11-20T19:25:52,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/f021b3338c544e809d06d4f7a5bfa94d, entries=150, sequenceid=243, filesize=11.9 K 2024-11-20T19:25:52,363 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 77d20aeb75d4c61c8417aef439da7cbf in 1165ms, sequenceid=243, compaction requested=true 2024-11-20T19:25:52,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:52,364 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:52,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:52,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:52,365 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:52,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:52,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:52,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:52,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:52,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130812359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,366 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93759 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:52,366 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/A is initiating minor compaction (all files) 2024-11-20T19:25:52,367 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/A in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:52,367 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/4ad7e95a95634e12a374cf7f4fe975c6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/40f35ebe77ee42f2852d3d64cb898707, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7d4e130a9079438190577153ae72419d] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=91.6 K 2024-11-20T19:25:52,367 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:52,367 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/4ad7e95a95634e12a374cf7f4fe975c6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/40f35ebe77ee42f2852d3d64cb898707, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7d4e130a9079438190577153ae72419d] 2024-11-20T19:25:52,368 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ad7e95a95634e12a374cf7f4fe975c6, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732130748470 2024-11-20T19:25:52,369 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:52,369 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/B is initiating minor compaction (all files) 2024-11-20T19:25:52,369 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/B in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:52,369 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/70ca66ec0c8f43b487a703bc65278c1c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/3eff0e3944d4415aa0afe18d3847d05c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/12df09f6a2b14ebfb22064592597eb99] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=36.0 K 2024-11-20T19:25:52,369 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40f35ebe77ee42f2852d3d64cb898707, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732130749647 2024-11-20T19:25:52,371 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 70ca66ec0c8f43b487a703bc65278c1c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732130748470 2024-11-20T19:25:52,371 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d4e130a9079438190577153ae72419d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732130750883 2024-11-20T19:25:52,371 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 3eff0e3944d4415aa0afe18d3847d05c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732130749647 2024-11-20T19:25:52,372 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 12df09f6a2b14ebfb22064592597eb99, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732130750883 2024-11-20T19:25:52,374 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:52,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:25:52,375 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T19:25:52,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:52,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:52,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:52,375 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:52,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:52,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:52,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:52,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:52,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:52,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:52,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:52,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:52,393 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:52,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200b8c235f63c4469ba77913c5497050fd_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130752366/Put/seqid=0 2024-11-20T19:25:52,401 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#B#compaction#148 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:52,402 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/64244eea71ee4fa98048da0ef71fdb09 is 50, key is test_row_0/B:col10/1732130751198/Put/seqid=0 2024-11-20T19:25:52,427 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411204e170ded3aef4c18b3f9493acbcec6da_77d20aeb75d4c61c8417aef439da7cbf store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:52,429 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411204e170ded3aef4c18b3f9493acbcec6da_77d20aeb75d4c61c8417aef439da7cbf, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:52,430 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204e170ded3aef4c18b3f9493acbcec6da_77d20aeb75d4c61c8417aef439da7cbf because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:52,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742005_1181 (size=14794) 2024-11-20T19:25:52,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130812443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130812444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130812452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130812446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742006_1182 (size=12697) 2024-11-20T19:25:52,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742007_1183 (size=4469) 2024-11-20T19:25:52,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T19:25:52,528 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,528 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T19:25:52,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:52,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:52,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:52,529 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:52,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:52,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:52,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130812556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130812557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130812558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130812559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,681 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,681 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T19:25:52,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:52,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:52,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:52,682 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:52,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:52,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:52,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130812764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130812763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130812765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:52,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130812767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T19:25:52,831 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:52,834 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,834 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T19:25:52,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:52,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:52,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:52,835 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:52,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:52,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:52,840 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200b8c235f63c4469ba77913c5497050fd_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200b8c235f63c4469ba77913c5497050fd_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:52,842 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/7561d720f75949619edb9dede270c8c1, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:52,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/7561d720f75949619edb9dede270c8c1 is 175, key is test_row_0/A:col10/1732130752366/Put/seqid=0 2024-11-20T19:25:52,882 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/64244eea71ee4fa98048da0ef71fdb09 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/64244eea71ee4fa98048da0ef71fdb09 2024-11-20T19:25:52,892 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#A#compaction#147 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:52,892 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/ae3239cfa04c435aa7190cebda3002ab is 175, key is test_row_0/A:col10/1732130751198/Put/seqid=0 2024-11-20T19:25:52,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742008_1184 (size=39749) 2024-11-20T19:25:52,898 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/B of 77d20aeb75d4c61c8417aef439da7cbf into 64244eea71ee4fa98048da0ef71fdb09(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:52,898 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:52,898 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/B, priority=13, startTime=1732130752365; duration=0sec 2024-11-20T19:25:52,898 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:52,898 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:B 2024-11-20T19:25:52,898 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:52,900 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=256, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/7561d720f75949619edb9dede270c8c1 2024-11-20T19:25:52,903 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:52,903 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/C is initiating minor compaction (all files) 2024-11-20T19:25:52,903 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/C in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:52,903 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4296083ef8b34351b4b8ee9ebfa9ebeb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1ce09834e8964ceabfd1921f04d912fa, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/f021b3338c544e809d06d4f7a5bfa94d] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=36.0 K 2024-11-20T19:25:52,904 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 4296083ef8b34351b4b8ee9ebfa9ebeb, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732130748470 2024-11-20T19:25:52,904 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ce09834e8964ceabfd1921f04d912fa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1732130749647 2024-11-20T19:25:52,913 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting f021b3338c544e809d06d4f7a5bfa94d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732130750883 2024-11-20T19:25:52,925 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/f8e49ec9144f4f908113698460633165 is 50, key is test_row_0/B:col10/1732130752366/Put/seqid=0 2024-11-20T19:25:52,932 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#C#compaction#151 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:52,933 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/787a550e318e4c4d94ac97bf2d92b9da is 50, key is test_row_0/C:col10/1732130751198/Put/seqid=0 2024-11-20T19:25:52,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742009_1185 (size=31651) 2024-11-20T19:25:52,944 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/ae3239cfa04c435aa7190cebda3002ab as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/ae3239cfa04c435aa7190cebda3002ab 2024-11-20T19:25:52,954 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/A of 77d20aeb75d4c61c8417aef439da7cbf into ae3239cfa04c435aa7190cebda3002ab(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:52,955 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:52,955 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/A, priority=13, startTime=1732130752364; duration=0sec 2024-11-20T19:25:52,955 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:52,955 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:A 2024-11-20T19:25:52,988 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:52,989 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T19:25:52,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:52,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:52,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:52,989 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:52,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:52,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:53,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742011_1187 (size=12697) 2024-11-20T19:25:53,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742010_1186 (size=12151) 2024-11-20T19:25:53,007 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/f8e49ec9144f4f908113698460633165 2024-11-20T19:25:53,012 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/787a550e318e4c4d94ac97bf2d92b9da as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/787a550e318e4c4d94ac97bf2d92b9da 2024-11-20T19:25:53,020 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/C of 77d20aeb75d4c61c8417aef439da7cbf into 787a550e318e4c4d94ac97bf2d92b9da(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:53,020 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:53,020 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/C, priority=13, startTime=1732130752366; duration=0sec 2024-11-20T19:25:53,020 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:53,020 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:C 2024-11-20T19:25:53,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/b1cec8baa74b4bd8a2723b0b57b67f2b is 50, key is test_row_0/C:col10/1732130752366/Put/seqid=0 2024-11-20T19:25:53,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130813069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742012_1188 (size=12151) 2024-11-20T19:25:53,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130813071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,076 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/b1cec8baa74b4bd8a2723b0b57b67f2b 2024-11-20T19:25:53,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130813074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130813074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/7561d720f75949619edb9dede270c8c1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7561d720f75949619edb9dede270c8c1 2024-11-20T19:25:53,088 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7561d720f75949619edb9dede270c8c1, entries=200, sequenceid=256, filesize=38.8 K 2024-11-20T19:25:53,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/f8e49ec9144f4f908113698460633165 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/f8e49ec9144f4f908113698460633165 2024-11-20T19:25:53,099 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/f8e49ec9144f4f908113698460633165, entries=150, sequenceid=256, filesize=11.9 K 2024-11-20T19:25:53,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/b1cec8baa74b4bd8a2723b0b57b67f2b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b1cec8baa74b4bd8a2723b0b57b67f2b 2024-11-20T19:25:53,111 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b1cec8baa74b4bd8a2723b0b57b67f2b, entries=150, sequenceid=256, filesize=11.9 K 2024-11-20T19:25:53,112 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 77d20aeb75d4c61c8417aef439da7cbf in 737ms, sequenceid=256, compaction requested=false 2024-11-20T19:25:53,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:53,142 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-20T19:25:53,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:53,143 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T19:25:53,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:53,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:53,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:53,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:53,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:53,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:53,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dcb85625a1e646edb87101dcc7c6f9d3_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130752443/Put/seqid=0 2024-11-20T19:25:53,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742013_1189 (size=12454) 2024-11-20T19:25:53,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:53,218 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dcb85625a1e646edb87101dcc7c6f9d3_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dcb85625a1e646edb87101dcc7c6f9d3_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:53,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/933660c772684a33af3725316ccdec2d, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:53,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/933660c772684a33af3725316ccdec2d is 175, key is test_row_0/A:col10/1732130752443/Put/seqid=0 2024-11-20T19:25:53,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742014_1190 (size=31255) 2024-11-20T19:25:53,256 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=282, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/933660c772684a33af3725316ccdec2d 2024-11-20T19:25:53,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/9da041f2653e4f14839f833ccfdf8b9d is 50, key is test_row_0/B:col10/1732130752443/Put/seqid=0 2024-11-20T19:25:53,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T19:25:53,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742015_1191 (size=12301) 2024-11-20T19:25:53,338 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/9da041f2653e4f14839f833ccfdf8b9d 2024-11-20T19:25:53,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/b400276b84334ecfa3ae734c01dff52f is 50, key is test_row_0/C:col10/1732130752443/Put/seqid=0 2024-11-20T19:25:53,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:53,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742016_1192 (size=12301) 2024-11-20T19:25:53,408 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/b400276b84334ecfa3ae734c01dff52f 2024-11-20T19:25:53,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/933660c772684a33af3725316ccdec2d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/933660c772684a33af3725316ccdec2d 2024-11-20T19:25:53,426 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/933660c772684a33af3725316ccdec2d, entries=150, sequenceid=282, filesize=30.5 K 2024-11-20T19:25:53,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/9da041f2653e4f14839f833ccfdf8b9d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/9da041f2653e4f14839f833ccfdf8b9d 2024-11-20T19:25:53,438 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/9da041f2653e4f14839f833ccfdf8b9d, entries=150, sequenceid=282, filesize=12.0 K 2024-11-20T19:25:53,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/b400276b84334ecfa3ae734c01dff52f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b400276b84334ecfa3ae734c01dff52f 2024-11-20T19:25:53,449 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b400276b84334ecfa3ae734c01dff52f, entries=150, sequenceid=282, filesize=12.0 K 2024-11-20T19:25:53,452 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=60.38 KB/61830 for 77d20aeb75d4c61c8417aef439da7cbf in 309ms, sequenceid=282, compaction requested=true 2024-11-20T19:25:53,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:53,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:53,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-20T19:25:53,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-20T19:25:53,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-20T19:25:53,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2320 sec 2024-11-20T19:25:53,456 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 1.2360 sec 2024-11-20T19:25:53,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:53,467 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:25:53,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:53,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:53,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:53,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:53,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:53,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:53,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b6af8b78e1e7424d91d513d739fa03cf_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130753449/Put/seqid=0 2024-11-20T19:25:53,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742017_1193 (size=14994) 2024-11-20T19:25:53,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130813584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130813587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130813587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130813589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130813589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130813692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130813692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130813694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130813695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130813897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130813897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:53,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130813896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130813897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:53,934 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:53,940 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b6af8b78e1e7424d91d513d739fa03cf_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b6af8b78e1e7424d91d513d739fa03cf_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:53,941 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/a723ea11683e43a5a3806ed5e479f4da, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:53,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/a723ea11683e43a5a3806ed5e479f4da is 175, key is test_row_0/A:col10/1732130753449/Put/seqid=0 2024-11-20T19:25:53,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742018_1194 (size=39949) 2024-11-20T19:25:53,974 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=295, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/a723ea11683e43a5a3806ed5e479f4da 2024-11-20T19:25:53,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/7bc1beda36f54f988f617b7d0e10a3c9 is 50, key is test_row_0/B:col10/1732130753449/Put/seqid=0 2024-11-20T19:25:54,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742019_1195 (size=12301) 2024-11-20T19:25:54,038 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/7bc1beda36f54f988f617b7d0e10a3c9 2024-11-20T19:25:54,095 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/e19299755d324f8b895aee525f80ac51 is 50, key is test_row_0/C:col10/1732130753449/Put/seqid=0 2024-11-20T19:25:54,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742020_1196 (size=12301) 2024-11-20T19:25:54,133 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/e19299755d324f8b895aee525f80ac51 2024-11-20T19:25:54,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/a723ea11683e43a5a3806ed5e479f4da as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/a723ea11683e43a5a3806ed5e479f4da 2024-11-20T19:25:54,147 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/a723ea11683e43a5a3806ed5e479f4da, entries=200, sequenceid=295, filesize=39.0 K 2024-11-20T19:25:54,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/7bc1beda36f54f988f617b7d0e10a3c9 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/7bc1beda36f54f988f617b7d0e10a3c9 2024-11-20T19:25:54,154 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/7bc1beda36f54f988f617b7d0e10a3c9, entries=150, sequenceid=295, filesize=12.0 K 2024-11-20T19:25:54,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/e19299755d324f8b895aee525f80ac51 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/e19299755d324f8b895aee525f80ac51 2024-11-20T19:25:54,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/e19299755d324f8b895aee525f80ac51, entries=150, sequenceid=295, filesize=12.0 K 2024-11-20T19:25:54,162 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 77d20aeb75d4c61c8417aef439da7cbf in 695ms, sequenceid=295, compaction requested=true 2024-11-20T19:25:54,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:54,162 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:54,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:54,163 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:54,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:54,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:54,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:54,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T19:25:54,163 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:54,164 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:54,164 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/B is initiating minor compaction (all files) 2024-11-20T19:25:54,164 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/B in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:54,165 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/64244eea71ee4fa98048da0ef71fdb09, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/f8e49ec9144f4f908113698460633165, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/9da041f2653e4f14839f833ccfdf8b9d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/7bc1beda36f54f988f617b7d0e10a3c9] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=48.3 K 2024-11-20T19:25:54,165 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142604 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:54,165 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/A is initiating minor compaction (all files) 2024-11-20T19:25:54,165 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/A in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:54,165 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/ae3239cfa04c435aa7190cebda3002ab, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7561d720f75949619edb9dede270c8c1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/933660c772684a33af3725316ccdec2d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/a723ea11683e43a5a3806ed5e479f4da] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=139.3 K 2024-11-20T19:25:54,165 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:54,165 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/ae3239cfa04c435aa7190cebda3002ab, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7561d720f75949619edb9dede270c8c1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/933660c772684a33af3725316ccdec2d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/a723ea11683e43a5a3806ed5e479f4da] 2024-11-20T19:25:54,166 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 64244eea71ee4fa98048da0ef71fdb09, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732130750883 2024-11-20T19:25:54,166 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae3239cfa04c435aa7190cebda3002ab, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732130750883 2024-11-20T19:25:54,166 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting f8e49ec9144f4f908113698460633165, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732130751229 2024-11-20T19:25:54,166 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7561d720f75949619edb9dede270c8c1, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732130751229 2024-11-20T19:25:54,166 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 9da041f2653e4f14839f833ccfdf8b9d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732130752443 2024-11-20T19:25:54,167 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 933660c772684a33af3725316ccdec2d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732130752443 2024-11-20T19:25:54,167 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bc1beda36f54f988f617b7d0e10a3c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130753392 2024-11-20T19:25:54,167 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting a723ea11683e43a5a3806ed5e479f4da, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130753392 2024-11-20T19:25:54,183 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#B#compaction#159 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:54,184 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/8c5d74fa59bc45ce94af973e878a8a5e is 50, key is test_row_0/B:col10/1732130753449/Put/seqid=0 2024-11-20T19:25:54,196 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:54,210 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:25:54,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:54,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:54,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:54,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:54,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:54,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:54,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:54,218 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120e68bc7ffc235419f91a009bab0645b71_77d20aeb75d4c61c8417aef439da7cbf store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:54,220 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120e68bc7ffc235419f91a009bab0645b71_77d20aeb75d4c61c8417aef439da7cbf, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:54,221 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e68bc7ffc235419f91a009bab0645b71_77d20aeb75d4c61c8417aef439da7cbf because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:54,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130814228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130814229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742021_1197 (size=12983) 2024-11-20T19:25:54,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130814236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130814236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,247 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/8c5d74fa59bc45ce94af973e878a8a5e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/8c5d74fa59bc45ce94af973e878a8a5e 2024-11-20T19:25:54,256 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/B of 77d20aeb75d4c61c8417aef439da7cbf into 8c5d74fa59bc45ce94af973e878a8a5e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:54,257 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:54,257 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/B, priority=12, startTime=1732130754163; duration=0sec 2024-11-20T19:25:54,257 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:54,257 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:B 2024-11-20T19:25:54,257 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:25:54,259 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:25:54,259 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/C is initiating minor compaction (all files) 2024-11-20T19:25:54,259 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/C in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:54,259 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/787a550e318e4c4d94ac97bf2d92b9da, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b1cec8baa74b4bd8a2723b0b57b67f2b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b400276b84334ecfa3ae734c01dff52f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/e19299755d324f8b895aee525f80ac51] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=48.3 K 2024-11-20T19:25:54,260 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 787a550e318e4c4d94ac97bf2d92b9da, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732130750883 2024-11-20T19:25:54,260 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting b1cec8baa74b4bd8a2723b0b57b67f2b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1732130751229 2024-11-20T19:25:54,261 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting b400276b84334ecfa3ae734c01dff52f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732130752443 2024-11-20T19:25:54,261 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting e19299755d324f8b895aee525f80ac51, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130753392 2024-11-20T19:25:54,271 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c76625438472482d9787130cc3148c94_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130754208/Put/seqid=0 2024-11-20T19:25:54,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742022_1198 (size=4469) 2024-11-20T19:25:54,277 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#A#compaction#160 average throughput is 0.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:54,278 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/faef370e69dc428eac9a0a5ba1245706 is 175, key is test_row_0/A:col10/1732130753449/Put/seqid=0 2024-11-20T19:25:54,289 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#C#compaction#162 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:54,290 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/f3768fc1503c4a389c68f2ddbcb5e04f is 50, key is test_row_0/C:col10/1732130753449/Put/seqid=0 2024-11-20T19:25:54,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742024_1200 (size=31937) 2024-11-20T19:25:54,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742023_1199 (size=12454) 2024-11-20T19:25:54,303 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:54,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742025_1201 (size=12983) 2024-11-20T19:25:54,308 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c76625438472482d9787130cc3148c94_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c76625438472482d9787130cc3148c94_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:54,313 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/aa4ce61b89fe4fd6aeaf596263df1318, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:54,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/aa4ce61b89fe4fd6aeaf596263df1318 is 175, key is test_row_0/A:col10/1732130754208/Put/seqid=0 2024-11-20T19:25:54,315 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/f3768fc1503c4a389c68f2ddbcb5e04f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/f3768fc1503c4a389c68f2ddbcb5e04f 2024-11-20T19:25:54,321 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/C of 77d20aeb75d4c61c8417aef439da7cbf into f3768fc1503c4a389c68f2ddbcb5e04f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:54,321 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:54,321 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/C, priority=12, startTime=1732130754163; duration=0sec 2024-11-20T19:25:54,321 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:54,321 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:C 2024-11-20T19:25:54,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-20T19:25:54,328 INFO [Thread-598 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-20T19:25:54,329 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:54,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-20T19:25:54,331 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:54,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T19:25:54,335 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:54,335 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:54,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742026_1202 (size=31255) 2024-11-20T19:25:54,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130814337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130814337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130814344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130814344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T19:25:54,488 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:54,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:54,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:54,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:54,489 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130814544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130814545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130814548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130814549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130814594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T19:25:54,642 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:54,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:54,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:54,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:54,642 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,712 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/faef370e69dc428eac9a0a5ba1245706 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/faef370e69dc428eac9a0a5ba1245706 2024-11-20T19:25:54,721 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/A of 77d20aeb75d4c61c8417aef439da7cbf into faef370e69dc428eac9a0a5ba1245706(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:54,721 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:54,721 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/A, priority=12, startTime=1732130754162; duration=0sec 2024-11-20T19:25:54,721 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:54,721 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:A 2024-11-20T19:25:54,738 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=323, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/aa4ce61b89fe4fd6aeaf596263df1318 2024-11-20T19:25:54,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/ea5c788f75104f099067d9ce58450ed1 is 50, key is test_row_0/B:col10/1732130754208/Put/seqid=0 2024-11-20T19:25:54,796 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:54,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:54,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:54,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:54,797 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742027_1203 (size=12301) 2024-11-20T19:25:54,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130814850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130814851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130814854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:54,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130814854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T19:25:54,951 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:54,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:54,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:54,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:54,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:54,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:54,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,108 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:55,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:55,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:55,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:55,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,201 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/ea5c788f75104f099067d9ce58450ed1 2024-11-20T19:25:55,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/1e4bd6892ed340748a7af39818b68370 is 50, key is test_row_0/C:col10/1732130754208/Put/seqid=0 2024-11-20T19:25:55,261 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742028_1204 (size=12301) 2024-11-20T19:25:55,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:55,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:55,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:55,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:55,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,263 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/1e4bd6892ed340748a7af39818b68370 2024-11-20T19:25:55,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/aa4ce61b89fe4fd6aeaf596263df1318 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/aa4ce61b89fe4fd6aeaf596263df1318 2024-11-20T19:25:55,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/aa4ce61b89fe4fd6aeaf596263df1318, entries=150, sequenceid=323, filesize=30.5 K 2024-11-20T19:25:55,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/ea5c788f75104f099067d9ce58450ed1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/ea5c788f75104f099067d9ce58450ed1 2024-11-20T19:25:55,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/ea5c788f75104f099067d9ce58450ed1, entries=150, sequenceid=323, filesize=12.0 K 2024-11-20T19:25:55,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/1e4bd6892ed340748a7af39818b68370 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1e4bd6892ed340748a7af39818b68370 2024-11-20T19:25:55,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1e4bd6892ed340748a7af39818b68370, entries=150, sequenceid=323, filesize=12.0 K 2024-11-20T19:25:55,298 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 77d20aeb75d4c61c8417aef439da7cbf in 1087ms, sequenceid=323, compaction requested=false 2024-11-20T19:25:55,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:55,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:55,366 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:55,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:55,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:55,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:55,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:55,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:55,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:55,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112040b13c7e3910411ba26de4b97f52eead_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130755365/Put/seqid=0 2024-11-20T19:25:55,415 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,415 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:55,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:55,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:55,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:55,416 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T19:25:55,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130815435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742029_1205 (size=14994) 2024-11-20T19:25:55,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130815445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130815447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130815448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,458 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:55,463 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112040b13c7e3910411ba26de4b97f52eead_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112040b13c7e3910411ba26de4b97f52eead_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:55,465 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/05804593f89847bf8090619e98606d23, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:55,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/05804593f89847bf8090619e98606d23 is 175, key is test_row_0/A:col10/1732130755365/Put/seqid=0 2024-11-20T19:25:55,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742030_1206 (size=39949) 2024-11-20T19:25:55,503 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=338, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/05804593f89847bf8090619e98606d23 2024-11-20T19:25:55,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/fd351726dad042d3a6dd4d6271369ec4 is 50, key is test_row_0/B:col10/1732130755365/Put/seqid=0 2024-11-20T19:25:55,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130815546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130815555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130815555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130815556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742031_1207 (size=12301) 2024-11-20T19:25:55,567 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/fd351726dad042d3a6dd4d6271369ec4 2024-11-20T19:25:55,569 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:55,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:55,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:55,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:55,570 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:55,580 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/4adb29e876a046ccb8f5e16cf0a721a0 is 50, key is test_row_0/C:col10/1732130755365/Put/seqid=0 2024-11-20T19:25:55,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742032_1208 (size=12301) 2024-11-20T19:25:55,617 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/4adb29e876a046ccb8f5e16cf0a721a0 2024-11-20T19:25:55,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/05804593f89847bf8090619e98606d23 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/05804593f89847bf8090619e98606d23 2024-11-20T19:25:55,629 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/05804593f89847bf8090619e98606d23, entries=200, sequenceid=338, filesize=39.0 K 2024-11-20T19:25:55,630 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/fd351726dad042d3a6dd4d6271369ec4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/fd351726dad042d3a6dd4d6271369ec4 2024-11-20T19:25:55,636 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/fd351726dad042d3a6dd4d6271369ec4, entries=150, sequenceid=338, filesize=12.0 K 2024-11-20T19:25:55,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/4adb29e876a046ccb8f5e16cf0a721a0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4adb29e876a046ccb8f5e16cf0a721a0 2024-11-20T19:25:55,645 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4adb29e876a046ccb8f5e16cf0a721a0, entries=150, sequenceid=338, filesize=12.0 K 2024-11-20T19:25:55,646 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 77d20aeb75d4c61c8417aef439da7cbf in 280ms, sequenceid=338, compaction requested=true 2024-11-20T19:25:55,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:55,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:55,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:55,647 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:55,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:55,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:55,647 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:55,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:55,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:55,649 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:55,649 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/B is initiating minor compaction (all files) 2024-11-20T19:25:55,649 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/B in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:55,649 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/8c5d74fa59bc45ce94af973e878a8a5e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/ea5c788f75104f099067d9ce58450ed1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/fd351726dad042d3a6dd4d6271369ec4] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=36.7 K 2024-11-20T19:25:55,649 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:55,649 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/A is initiating minor compaction (all files) 2024-11-20T19:25:55,649 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/A in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:55,649 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/faef370e69dc428eac9a0a5ba1245706, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/aa4ce61b89fe4fd6aeaf596263df1318, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/05804593f89847bf8090619e98606d23] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=100.7 K 2024-11-20T19:25:55,649 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:55,649 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/faef370e69dc428eac9a0a5ba1245706, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/aa4ce61b89fe4fd6aeaf596263df1318, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/05804593f89847bf8090619e98606d23] 2024-11-20T19:25:55,651 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c5d74fa59bc45ce94af973e878a8a5e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130753392 2024-11-20T19:25:55,651 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting faef370e69dc428eac9a0a5ba1245706, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130753392 2024-11-20T19:25:55,652 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting ea5c788f75104f099067d9ce58450ed1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732130754208 2024-11-20T19:25:55,652 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa4ce61b89fe4fd6aeaf596263df1318, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732130754208 2024-11-20T19:25:55,652 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting fd351726dad042d3a6dd4d6271369ec4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1732130754231 2024-11-20T19:25:55,654 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05804593f89847bf8090619e98606d23, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1732130754231 2024-11-20T19:25:55,670 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:55,681 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#B#compaction#169 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:55,681 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/e5c8b675f9ac41f08a96970785a55acd is 50, key is test_row_0/B:col10/1732130755365/Put/seqid=0 2024-11-20T19:25:55,683 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411204afdd25d1f7449c18a8730d915b6e673_77d20aeb75d4c61c8417aef439da7cbf store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:55,685 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411204afdd25d1f7449c18a8730d915b6e673_77d20aeb75d4c61c8417aef439da7cbf, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:55,685 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204afdd25d1f7449c18a8730d915b6e673_77d20aeb75d4c61c8417aef439da7cbf because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:55,724 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742033_1209 (size=4469) 2024-11-20T19:25:55,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-20T19:25:55,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:55,725 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:25:55,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:55,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:55,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:55,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:55,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:55,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:55,726 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#A#compaction#168 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:55,726 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/2b484cd0afd940b78d1b270ae69c74a4 is 175, key is test_row_0/A:col10/1732130755365/Put/seqid=0 2024-11-20T19:25:55,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742034_1210 (size=13085) 2024-11-20T19:25:55,740 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/e5c8b675f9ac41f08a96970785a55acd as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/e5c8b675f9ac41f08a96970785a55acd 2024-11-20T19:25:55,748 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/B of 77d20aeb75d4c61c8417aef439da7cbf into e5c8b675f9ac41f08a96970785a55acd(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:55,748 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:55,748 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/B, priority=13, startTime=1732130755647; duration=0sec 2024-11-20T19:25:55,748 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:55,748 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:B 2024-11-20T19:25:55,749 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:55,750 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:55,751 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/C is initiating minor compaction (all files) 2024-11-20T19:25:55,751 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/C in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:55,751 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/f3768fc1503c4a389c68f2ddbcb5e04f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1e4bd6892ed340748a7af39818b68370, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4adb29e876a046ccb8f5e16cf0a721a0] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=36.7 K 2024-11-20T19:25:55,751 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting f3768fc1503c4a389c68f2ddbcb5e04f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732130753392 2024-11-20T19:25:55,752 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e4bd6892ed340748a7af39818b68370, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1732130754208 2024-11-20T19:25:55,752 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 4adb29e876a046ccb8f5e16cf0a721a0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1732130754231 2024-11-20T19:25:55,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:55,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:55,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742035_1211 (size=32039) 2024-11-20T19:25:55,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201c5f4185d634424e90bf60f9e58241e3_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130755425/Put/seqid=0 2024-11-20T19:25:55,771 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#C#compaction#171 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:55,772 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/f4a587672b1a4a8981c217d3e0487761 is 50, key is test_row_0/C:col10/1732130755365/Put/seqid=0 2024-11-20T19:25:55,775 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/2b484cd0afd940b78d1b270ae69c74a4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2b484cd0afd940b78d1b270ae69c74a4 2024-11-20T19:25:55,783 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/A of 77d20aeb75d4c61c8417aef439da7cbf into 2b484cd0afd940b78d1b270ae69c74a4(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:55,783 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:55,783 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/A, priority=13, startTime=1732130755647; duration=0sec 2024-11-20T19:25:55,783 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:55,783 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:A 2024-11-20T19:25:55,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130815776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130815776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130815778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130815785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742037_1213 (size=13085) 2024-11-20T19:25:55,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742036_1212 (size=12454) 2024-11-20T19:25:55,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130815886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130815886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130815886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:55,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:55,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130815896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:56,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130816090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:56,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130816090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:56,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130816091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:56,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130816103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:56,206 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/f4a587672b1a4a8981c217d3e0487761 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/f4a587672b1a4a8981c217d3e0487761 2024-11-20T19:25:56,214 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/C of 77d20aeb75d4c61c8417aef439da7cbf into f4a587672b1a4a8981c217d3e0487761(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:56,214 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:56,214 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/C, priority=13, startTime=1732130755647; duration=0sec 2024-11-20T19:25:56,214 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:56,214 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:C 2024-11-20T19:25:56,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:56,224 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411201c5f4185d634424e90bf60f9e58241e3_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201c5f4185d634424e90bf60f9e58241e3_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:56,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/83e9218a69174aea9e7102395190fbec, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:56,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/83e9218a69174aea9e7102395190fbec is 175, key is test_row_0/A:col10/1732130755425/Put/seqid=0 2024-11-20T19:25:56,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742038_1214 (size=31255) 2024-11-20T19:25:56,269 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=364, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/83e9218a69174aea9e7102395190fbec 2024-11-20T19:25:56,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/36fbd765c6c44b47a627fd94f5241965 is 50, key is test_row_0/B:col10/1732130755425/Put/seqid=0 2024-11-20T19:25:56,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742039_1215 (size=12301) 2024-11-20T19:25:56,344 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/36fbd765c6c44b47a627fd94f5241965 2024-11-20T19:25:56,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/14ccb55a753a4bd697c5db6c606a47e2 is 50, key is test_row_0/C:col10/1732130755425/Put/seqid=0 2024-11-20T19:25:56,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130816393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:56,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130816394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:56,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130816394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:56,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130816407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:56,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742040_1216 (size=12301) 2024-11-20T19:25:56,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T19:25:56,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130816615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:56,617 DEBUG [Thread-596 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., hostname=db9c3a6c6492,35979,1732130703276, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:25:56,815 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/14ccb55a753a4bd697c5db6c606a47e2 2024-11-20T19:25:56,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/83e9218a69174aea9e7102395190fbec as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/83e9218a69174aea9e7102395190fbec 2024-11-20T19:25:56,828 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/83e9218a69174aea9e7102395190fbec, entries=150, sequenceid=364, filesize=30.5 K 2024-11-20T19:25:56,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/36fbd765c6c44b47a627fd94f5241965 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/36fbd765c6c44b47a627fd94f5241965 2024-11-20T19:25:56,835 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/36fbd765c6c44b47a627fd94f5241965, entries=150, sequenceid=364, filesize=12.0 K 2024-11-20T19:25:56,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/14ccb55a753a4bd697c5db6c606a47e2 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/14ccb55a753a4bd697c5db6c606a47e2 2024-11-20T19:25:56,846 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/14ccb55a753a4bd697c5db6c606a47e2, entries=150, sequenceid=364, filesize=12.0 K 2024-11-20T19:25:56,851 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 77d20aeb75d4c61c8417aef439da7cbf in 1127ms, sequenceid=364, compaction requested=false 2024-11-20T19:25:56,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:56,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:56,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-20T19:25:56,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-20T19:25:56,854 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-20T19:25:56,854 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5180 sec 2024-11-20T19:25:56,855 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.5250 sec 2024-11-20T19:25:56,901 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:25:56,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:56,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:56,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:56,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:56,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:56,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:56,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:56,923 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208e13654abaef4e1f8b09fe79758564e7_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130755772/Put/seqid=0 2024-11-20T19:25:56,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130816947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:56,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742041_1217 (size=14994) 2024-11-20T19:25:56,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130816949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:56,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130816950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:56,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:56,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130816953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:57,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130817055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:57,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130817057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:57,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130817057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:57,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130817059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:57,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130817260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:57,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130817261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:57,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130817261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:57,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130817262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:57,357 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:57,362 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208e13654abaef4e1f8b09fe79758564e7_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208e13654abaef4e1f8b09fe79758564e7_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:57,363 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/1ff875f249bb42e8ba4ab04b9656796b, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:57,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/1ff875f249bb42e8ba4ab04b9656796b is 175, key is test_row_0/A:col10/1732130755772/Put/seqid=0 2024-11-20T19:25:57,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742042_1218 (size=39949) 2024-11-20T19:25:57,413 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=378, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/1ff875f249bb42e8ba4ab04b9656796b 2024-11-20T19:25:57,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/ef7dc5f7944d406d94e39248bf1217bc is 50, key is test_row_0/B:col10/1732130755772/Put/seqid=0 2024-11-20T19:25:57,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742043_1219 (size=12301) 2024-11-20T19:25:57,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=378 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/ef7dc5f7944d406d94e39248bf1217bc 2024-11-20T19:25:57,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/520aabc64ae34c2faee4efd00aab91ed is 50, key is test_row_0/C:col10/1732130755772/Put/seqid=0 2024-11-20T19:25:57,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742044_1220 (size=12301) 2024-11-20T19:25:57,536 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=378 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/520aabc64ae34c2faee4efd00aab91ed 2024-11-20T19:25:57,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/1ff875f249bb42e8ba4ab04b9656796b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/1ff875f249bb42e8ba4ab04b9656796b 2024-11-20T19:25:57,562 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/1ff875f249bb42e8ba4ab04b9656796b, entries=200, sequenceid=378, filesize=39.0 K 2024-11-20T19:25:57,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/ef7dc5f7944d406d94e39248bf1217bc as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/ef7dc5f7944d406d94e39248bf1217bc 2024-11-20T19:25:57,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130817563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:57,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130817563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:57,569 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/ef7dc5f7944d406d94e39248bf1217bc, entries=150, sequenceid=378, filesize=12.0 K 2024-11-20T19:25:57,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130817565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:57,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/520aabc64ae34c2faee4efd00aab91ed as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/520aabc64ae34c2faee4efd00aab91ed 2024-11-20T19:25:57,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:57,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130817566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:57,579 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/520aabc64ae34c2faee4efd00aab91ed, entries=150, sequenceid=378, filesize=12.0 K 2024-11-20T19:25:57,580 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 77d20aeb75d4c61c8417aef439da7cbf in 679ms, sequenceid=378, compaction requested=true 2024-11-20T19:25:57,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:57,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:57,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:57,580 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:57,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:57,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:57,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:57,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T19:25:57,581 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:57,582 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:57,582 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/A is initiating minor compaction (all files) 2024-11-20T19:25:57,582 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/A in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:57,582 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2b484cd0afd940b78d1b270ae69c74a4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/83e9218a69174aea9e7102395190fbec, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/1ff875f249bb42e8ba4ab04b9656796b] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=100.8 K 2024-11-20T19:25:57,582 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:57,582 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2b484cd0afd940b78d1b270ae69c74a4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/83e9218a69174aea9e7102395190fbec, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/1ff875f249bb42e8ba4ab04b9656796b] 2024-11-20T19:25:57,583 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:57,583 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/B is initiating minor compaction (all files) 2024-11-20T19:25:57,583 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/B in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:57,583 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/e5c8b675f9ac41f08a96970785a55acd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/36fbd765c6c44b47a627fd94f5241965, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/ef7dc5f7944d406d94e39248bf1217bc] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=36.8 K 2024-11-20T19:25:57,587 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5c8b675f9ac41f08a96970785a55acd, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1732130754231 2024-11-20T19:25:57,587 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b484cd0afd940b78d1b270ae69c74a4, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1732130754231 2024-11-20T19:25:57,595 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 83e9218a69174aea9e7102395190fbec, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732130755425 2024-11-20T19:25:57,595 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36fbd765c6c44b47a627fd94f5241965, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732130755425 2024-11-20T19:25:57,597 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef7dc5f7944d406d94e39248bf1217bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=378, earliestPutTs=1732130755772 2024-11-20T19:25:57,597 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ff875f249bb42e8ba4ab04b9656796b, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=378, earliestPutTs=1732130755772 2024-11-20T19:25:57,618 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#B#compaction#177 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:57,619 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/2b3529b65bdd4773beb0cff1097585dd is 50, key is test_row_0/B:col10/1732130755772/Put/seqid=0 2024-11-20T19:25:57,636 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:57,643 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120a4a03351faa1411d86762a46adc378ce_77d20aeb75d4c61c8417aef439da7cbf store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:57,645 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120a4a03351faa1411d86762a46adc378ce_77d20aeb75d4c61c8417aef439da7cbf, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:57,645 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a4a03351faa1411d86762a46adc378ce_77d20aeb75d4c61c8417aef439da7cbf because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:57,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742045_1221 (size=13187) 2024-11-20T19:25:57,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742046_1222 (size=4469) 2024-11-20T19:25:57,678 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#A#compaction#178 average throughput is 0.58 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:57,679 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/3661253e60224a719e9e308ba42d7684 is 175, key is test_row_0/A:col10/1732130755772/Put/seqid=0 2024-11-20T19:25:57,703 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/2b3529b65bdd4773beb0cff1097585dd as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/2b3529b65bdd4773beb0cff1097585dd 2024-11-20T19:25:57,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742047_1223 (size=32141) 2024-11-20T19:25:57,712 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/B of 77d20aeb75d4c61c8417aef439da7cbf into 2b3529b65bdd4773beb0cff1097585dd(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:57,712 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:57,712 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/B, priority=13, startTime=1732130757580; duration=0sec 2024-11-20T19:25:57,712 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:57,712 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:B 2024-11-20T19:25:57,712 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:57,714 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:57,714 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/C is initiating minor compaction (all files) 2024-11-20T19:25:57,714 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/C in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:57,714 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/f4a587672b1a4a8981c217d3e0487761, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/14ccb55a753a4bd697c5db6c606a47e2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/520aabc64ae34c2faee4efd00aab91ed] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=36.8 K 2024-11-20T19:25:57,715 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4a587672b1a4a8981c217d3e0487761, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1732130754231 2024-11-20T19:25:57,715 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14ccb55a753a4bd697c5db6c606a47e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1732130755425 2024-11-20T19:25:57,715 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 520aabc64ae34c2faee4efd00aab91ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=378, earliestPutTs=1732130755772 2024-11-20T19:25:57,722 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/3661253e60224a719e9e308ba42d7684 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/3661253e60224a719e9e308ba42d7684 2024-11-20T19:25:57,731 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/A of 77d20aeb75d4c61c8417aef439da7cbf into 3661253e60224a719e9e308ba42d7684(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:57,731 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:57,731 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/A, priority=13, startTime=1732130757580; duration=0sec 2024-11-20T19:25:57,731 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:57,731 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:A 2024-11-20T19:25:57,739 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#C#compaction#179 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:57,739 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/ad1e3656f26b4b229041d016635af15b is 50, key is test_row_0/C:col10/1732130755772/Put/seqid=0 2024-11-20T19:25:57,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742048_1224 (size=13187) 2024-11-20T19:25:57,780 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/ad1e3656f26b4b229041d016635af15b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/ad1e3656f26b4b229041d016635af15b 2024-11-20T19:25:57,792 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/C of 77d20aeb75d4c61c8417aef439da7cbf into ad1e3656f26b4b229041d016635af15b(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:57,792 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:57,792 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/C, priority=13, startTime=1732130757580; duration=0sec 2024-11-20T19:25:57,792 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:57,792 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:C 2024-11-20T19:25:58,071 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:25:58,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:58,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:58,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:58,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:58,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:58,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:58,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:58,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130818082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130818083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130818085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130818085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,117 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202d763547a0dd4c85b6898d8166c6cc30_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130758070/Put/seqid=0 2024-11-20T19:25:58,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742049_1225 (size=14994) 2024-11-20T19:25:58,150 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:58,156 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202d763547a0dd4c85b6898d8166c6cc30_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202d763547a0dd4c85b6898d8166c6cc30_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:58,157 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/758673adfc4b4b83a84a40391300172d, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:58,158 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/758673adfc4b4b83a84a40391300172d is 175, key is test_row_0/A:col10/1732130758070/Put/seqid=0 2024-11-20T19:25:58,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742050_1226 (size=39949) 2024-11-20T19:25:58,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130818186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130818186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130818188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130818189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,394 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130818392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130818392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130818393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130818395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-20T19:25:58,440 INFO [Thread-598 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-20T19:25:58,441 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:25:58,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-20T19:25:58,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T19:25:58,444 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:25:58,446 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:25:58,446 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:25:58,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T19:25:58,581 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=407, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/758673adfc4b4b83a84a40391300172d 2024-11-20T19:25:58,598 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,598 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T19:25:58,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:58,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:58,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:58,599 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:58,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:58,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:58,604 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/5969261640fd4e188a9a942405a54a3b is 50, key is test_row_0/B:col10/1732130758070/Put/seqid=0 2024-11-20T19:25:58,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742051_1227 (size=12301) 2024-11-20T19:25:58,633 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/5969261640fd4e188a9a942405a54a3b 2024-11-20T19:25:58,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/bbda7f34a970438fb498c647ddab2395 is 50, key is test_row_0/C:col10/1732130758070/Put/seqid=0 2024-11-20T19:25:58,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742052_1228 (size=12301) 2024-11-20T19:25:58,669 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/bbda7f34a970438fb498c647ddab2395 2024-11-20T19:25:58,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/758673adfc4b4b83a84a40391300172d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/758673adfc4b4b83a84a40391300172d 2024-11-20T19:25:58,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/758673adfc4b4b83a84a40391300172d, entries=200, sequenceid=407, filesize=39.0 K 2024-11-20T19:25:58,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/5969261640fd4e188a9a942405a54a3b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/5969261640fd4e188a9a942405a54a3b 2024-11-20T19:25:58,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/5969261640fd4e188a9a942405a54a3b, entries=150, sequenceid=407, filesize=12.0 K 2024-11-20T19:25:58,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/bbda7f34a970438fb498c647ddab2395 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/bbda7f34a970438fb498c647ddab2395 2024-11-20T19:25:58,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/bbda7f34a970438fb498c647ddab2395, entries=150, sequenceid=407, filesize=12.0 K 2024-11-20T19:25:58,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 77d20aeb75d4c61c8417aef439da7cbf in 626ms, sequenceid=407, compaction requested=false 2024-11-20T19:25:58,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:58,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:58,701 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:25:58,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:58,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:58,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:58,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:58,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:58,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:58,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c94aba4354fb48d1be9426f04274131c_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130758080/Put/seqid=0 2024-11-20T19:25:58,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130818737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130818738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T19:25:58,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130818744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,751 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T19:25:58,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:58,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:58,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:58,752 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:58,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:58,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:58,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130818753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742053_1229 (size=14994) 2024-11-20T19:25:58,766 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:58,771 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c94aba4354fb48d1be9426f04274131c_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c94aba4354fb48d1be9426f04274131c_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:58,773 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/6d0a03b6fbb7435e8571fd657cc9367e, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:58,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/6d0a03b6fbb7435e8571fd657cc9367e is 175, key is test_row_0/A:col10/1732130758080/Put/seqid=0 2024-11-20T19:25:58,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742054_1230 (size=39949) 2024-11-20T19:25:58,794 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=421, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/6d0a03b6fbb7435e8571fd657cc9367e 2024-11-20T19:25:58,815 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/b6492d50ed1a4656924af0afdda932e6 is 50, key is test_row_0/B:col10/1732130758080/Put/seqid=0 2024-11-20T19:25:58,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742055_1231 (size=12301) 2024-11-20T19:25:58,831 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/b6492d50ed1a4656924af0afdda932e6 2024-11-20T19:25:58,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/04fa17ae58ab47bebf9e6b296f7a8c98 is 50, key is test_row_0/C:col10/1732130758080/Put/seqid=0 2024-11-20T19:25:58,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130818844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130818846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130818848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:58,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130818858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742056_1232 (size=12301) 2024-11-20T19:25:58,904 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:58,904 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T19:25:58,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:58,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:58,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:58,905 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:58,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:58,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T19:25:59,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130819048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130819048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,057 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,058 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T19:25:59,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:59,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,059 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130819052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130819064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,211 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,212 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T19:25:59,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:59,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,212 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,285 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/04fa17ae58ab47bebf9e6b296f7a8c98 2024-11-20T19:25:59,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/6d0a03b6fbb7435e8571fd657cc9367e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/6d0a03b6fbb7435e8571fd657cc9367e 2024-11-20T19:25:59,298 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/6d0a03b6fbb7435e8571fd657cc9367e, entries=200, sequenceid=421, filesize=39.0 K 2024-11-20T19:25:59,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/b6492d50ed1a4656924af0afdda932e6 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/b6492d50ed1a4656924af0afdda932e6 2024-11-20T19:25:59,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/b6492d50ed1a4656924af0afdda932e6, entries=150, sequenceid=421, filesize=12.0 K 2024-11-20T19:25:59,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/04fa17ae58ab47bebf9e6b296f7a8c98 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/04fa17ae58ab47bebf9e6b296f7a8c98 2024-11-20T19:25:59,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/04fa17ae58ab47bebf9e6b296f7a8c98, entries=150, sequenceid=421, filesize=12.0 K 2024-11-20T19:25:59,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 77d20aeb75d4c61c8417aef439da7cbf in 621ms, sequenceid=421, compaction requested=true 2024-11-20T19:25:59,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:59,323 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:59,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:25:59,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:59,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:25:59,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:59,323 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:59,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:25:59,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:59,324 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:59,324 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 112039 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:59,324 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/A is initiating minor compaction (all files) 2024-11-20T19:25:59,324 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/B is initiating minor compaction (all files) 2024-11-20T19:25:59,324 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/A in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,324 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/B in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,324 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/2b3529b65bdd4773beb0cff1097585dd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/5969261640fd4e188a9a942405a54a3b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/b6492d50ed1a4656924af0afdda932e6] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=36.9 K 2024-11-20T19:25:59,324 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/3661253e60224a719e9e308ba42d7684, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/758673adfc4b4b83a84a40391300172d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/6d0a03b6fbb7435e8571fd657cc9367e] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=109.4 K 2024-11-20T19:25:59,324 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,324 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/3661253e60224a719e9e308ba42d7684, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/758673adfc4b4b83a84a40391300172d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/6d0a03b6fbb7435e8571fd657cc9367e] 2024-11-20T19:25:59,325 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b3529b65bdd4773beb0cff1097585dd, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=378, earliestPutTs=1732130755772 2024-11-20T19:25:59,325 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 5969261640fd4e188a9a942405a54a3b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732130756946 2024-11-20T19:25:59,325 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting b6492d50ed1a4656924af0afdda932e6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1732130758080 2024-11-20T19:25:59,325 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3661253e60224a719e9e308ba42d7684, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=378, earliestPutTs=1732130755772 2024-11-20T19:25:59,326 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 758673adfc4b4b83a84a40391300172d, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732130756946 2024-11-20T19:25:59,326 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d0a03b6fbb7435e8571fd657cc9367e, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1732130758080 2024-11-20T19:25:59,341 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#B#compaction#186 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:59,341 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/b11536628f304fd9870ce8a6417d96c0 is 50, key is test_row_0/B:col10/1732130758080/Put/seqid=0 2024-11-20T19:25:59,349 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:59,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:59,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T19:25:59,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:25:59,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:59,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:25:59,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:59,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:25:59,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:25:59,365 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,365 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T19:25:59,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:59,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,366 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,367 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112059505541fcac4b3fb3cf48e7ae178c88_77d20aeb75d4c61c8417aef439da7cbf store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:59,369 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112059505541fcac4b3fb3cf48e7ae178c88_77d20aeb75d4c61c8417aef439da7cbf, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:59,369 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112059505541fcac4b3fb3cf48e7ae178c88_77d20aeb75d4c61c8417aef439da7cbf because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:59,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742057_1233 (size=13289) 2024-11-20T19:25:59,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120abc9b952be964d37a73ee5a599f0db78_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130758736/Put/seqid=0 2024-11-20T19:25:59,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742058_1234 (size=4469) 2024-11-20T19:25:59,450 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#A#compaction#187 average throughput is 0.24 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:59,451 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/5d8dcb6f1f99479f85d14db533ad69ba is 175, key is test_row_0/A:col10/1732130758080/Put/seqid=0 2024-11-20T19:25:59,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742059_1235 (size=17534) 2024-11-20T19:25:59,465 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:25:59,471 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120abc9b952be964d37a73ee5a599f0db78_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120abc9b952be964d37a73ee5a599f0db78_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:25:59,472 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/e48af47e9df64306bd302ca735c3609e, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:25:59,473 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/e48af47e9df64306bd302ca735c3609e is 175, key is test_row_0/A:col10/1732130758736/Put/seqid=0 2024-11-20T19:25:59,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130819483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130819483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130819483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130819484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742060_1236 (size=32243) 2024-11-20T19:25:59,515 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/5d8dcb6f1f99479f85d14db533ad69ba as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/5d8dcb6f1f99479f85d14db533ad69ba 2024-11-20T19:25:59,521 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,521 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T19:25:59,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:59,522 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/A of 77d20aeb75d4c61c8417aef439da7cbf into 5d8dcb6f1f99479f85d14db533ad69ba(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:59,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,522 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:59,522 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/A, priority=13, startTime=1732130759322; duration=0sec 2024-11-20T19:25:59,522 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,522 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:25:59,522 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:A 2024-11-20T19:25:59,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,522 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:25:59,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,523 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:25:59,524 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 77d20aeb75d4c61c8417aef439da7cbf/C is initiating minor compaction (all files) 2024-11-20T19:25:59,524 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 77d20aeb75d4c61c8417aef439da7cbf/C in TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,525 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/ad1e3656f26b4b229041d016635af15b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/bbda7f34a970438fb498c647ddab2395, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/04fa17ae58ab47bebf9e6b296f7a8c98] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp, totalSize=36.9 K 2024-11-20T19:25:59,528 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad1e3656f26b4b229041d016635af15b, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=378, earliestPutTs=1732130755772 2024-11-20T19:25:59,528 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbda7f34a970438fb498c647ddab2395, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1732130756946 2024-11-20T19:25:59,529 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04fa17ae58ab47bebf9e6b296f7a8c98, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1732130758080 2024-11-20T19:25:59,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T19:25:59,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742061_1237 (size=48639) 2024-11-20T19:25:59,552 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 77d20aeb75d4c61c8417aef439da7cbf#C#compaction#189 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:25:59,553 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/9f6c49fb93ce4d84b605aab9e58b8194 is 50, key is test_row_0/C:col10/1732130758080/Put/seqid=0 2024-11-20T19:25:59,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742062_1238 (size=13289) 2024-11-20T19:25:59,589 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/9f6c49fb93ce4d84b605aab9e58b8194 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/9f6c49fb93ce4d84b605aab9e58b8194 2024-11-20T19:25:59,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130819588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130819589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130819588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130819589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,598 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/C of 77d20aeb75d4c61c8417aef439da7cbf into 9f6c49fb93ce4d84b605aab9e58b8194(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:59,598 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:59,599 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/C, priority=13, startTime=1732130759323; duration=0sec 2024-11-20T19:25:59,599 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:59,599 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:C 2024-11-20T19:25:59,674 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,675 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T19:25:59,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:59,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,675 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130819794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130819797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130819797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,803 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:25:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130819798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,807 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/b11536628f304fd9870ce8a6417d96c0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/b11536628f304fd9870ce8a6417d96c0 2024-11-20T19:25:59,815 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 77d20aeb75d4c61c8417aef439da7cbf/B of 77d20aeb75d4c61c8417aef439da7cbf into b11536628f304fd9870ce8a6417d96c0(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:25:59,815 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:25:59,815 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf., storeName=77d20aeb75d4c61c8417aef439da7cbf/B, priority=13, startTime=1732130759323; duration=0sec 2024-11-20T19:25:59,815 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:25:59,815 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:B 2024-11-20T19:25:59,827 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T19:25:59,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:59,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,828 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,953 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=446, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/e48af47e9df64306bd302ca735c3609e 2024-11-20T19:25:59,975 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/4350e9de531d440da96b5e1a30793073 is 50, key is test_row_0/B:col10/1732130758736/Put/seqid=0 2024-11-20T19:25:59,981 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:25:59,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T19:25:59,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:25:59,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:25:59,982 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:25:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742063_1239 (size=12301) 2024-11-20T19:26:00,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130820099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130820099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130820101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130820106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,134 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T19:26:00,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:26:00,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:26:00,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:26:00,134 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,287 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,288 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T19:26:00,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:26:00,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:26:00,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:26:00,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,423 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/4350e9de531d440da96b5e1a30793073 2024-11-20T19:26:00,440 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,441 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T19:26:00,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:26:00,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:26:00,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:26:00,442 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:00,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/b2ac4f1236754652a0a79d485f3b0926 is 50, key is test_row_0/C:col10/1732130758736/Put/seqid=0 2024-11-20T19:26:00,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742064_1240 (size=12301) 2024-11-20T19:26:00,482 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/b2ac4f1236754652a0a79d485f3b0926 2024-11-20T19:26:00,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/e48af47e9df64306bd302ca735c3609e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/e48af47e9df64306bd302ca735c3609e 2024-11-20T19:26:00,495 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/e48af47e9df64306bd302ca735c3609e, entries=250, sequenceid=446, filesize=47.5 K 2024-11-20T19:26:00,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/4350e9de531d440da96b5e1a30793073 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/4350e9de531d440da96b5e1a30793073 2024-11-20T19:26:00,501 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/4350e9de531d440da96b5e1a30793073, entries=150, sequenceid=446, filesize=12.0 K 2024-11-20T19:26:00,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/b2ac4f1236754652a0a79d485f3b0926 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b2ac4f1236754652a0a79d485f3b0926 2024-11-20T19:26:00,508 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b2ac4f1236754652a0a79d485f3b0926, entries=150, sequenceid=446, filesize=12.0 K 2024-11-20T19:26:00,509 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 77d20aeb75d4c61c8417aef439da7cbf in 1153ms, sequenceid=446, compaction requested=false 2024-11-20T19:26:00,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:26:00,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T19:26:00,595 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-20T19:26:00,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:26:00,595 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:26:00,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:26:00,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:00,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:26:00,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:00,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:26:00,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:00,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:00,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. as already flushing 2024-11-20T19:26:00,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205a8901cd234046c098876d450a38544d_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130759378/Put/seqid=0 2024-11-20T19:26:00,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742065_1241 (size=12454) 2024-11-20T19:26:00,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130820633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130820633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130820634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130820636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130820637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130820738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130820738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130820739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130820743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130820750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130820942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130820943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130820945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130820951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:00,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:00,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130820951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:01,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:01,029 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205a8901cd234046c098876d450a38544d_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205a8901cd234046c098876d450a38544d_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:01,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/322377c6a3414b639b4d2735962fae68, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:26:01,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/322377c6a3414b639b4d2735962fae68 is 175, key is test_row_0/A:col10/1732130759378/Put/seqid=0 2024-11-20T19:26:01,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742066_1242 (size=31255) 2024-11-20T19:26:01,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:01,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130821243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:01,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:01,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130821246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:01,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:01,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130821248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:01,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:01,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130821254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:01,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:01,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130821255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:01,437 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=460, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/322377c6a3414b639b4d2735962fae68 2024-11-20T19:26:01,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/8e806298895c4d138260f96354c7df09 is 50, key is test_row_0/B:col10/1732130759378/Put/seqid=0 2024-11-20T19:26:01,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742067_1243 (size=12301) 2024-11-20T19:26:01,573 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T19:26:01,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:01,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:01,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60158 deadline: 1732130821749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:01,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60154 deadline: 1732130821750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:01,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:01,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60128 deadline: 1732130821753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:01,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:01,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60184 deadline: 1732130821759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:01,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:01,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:60174 deadline: 1732130821760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:01,828 DEBUG [Thread-605 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08ba8425 to 127.0.0.1:50476 2024-11-20T19:26:01,828 DEBUG [Thread-605 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:01,828 DEBUG [Thread-601 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x152377d4 to 127.0.0.1:50476 2024-11-20T19:26:01,828 DEBUG [Thread-601 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:01,828 DEBUG [Thread-603 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1a52344f to 127.0.0.1:50476 2024-11-20T19:26:01,828 DEBUG [Thread-603 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:01,829 DEBUG [Thread-599 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68f0be85 to 127.0.0.1:50476 2024-11-20T19:26:01,829 DEBUG [Thread-599 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:01,853 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/8e806298895c4d138260f96354c7df09 2024-11-20T19:26:01,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/e921e1ea86b74f6b8e0986dc396f9e5b is 50, key is test_row_0/C:col10/1732130759378/Put/seqid=0 2024-11-20T19:26:01,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742068_1244 (size=12301) 2024-11-20T19:26:02,268 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/e921e1ea86b74f6b8e0986dc396f9e5b 2024-11-20T19:26:02,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/322377c6a3414b639b4d2735962fae68 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/322377c6a3414b639b4d2735962fae68 2024-11-20T19:26:02,277 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/322377c6a3414b639b4d2735962fae68, entries=150, sequenceid=460, filesize=30.5 K 2024-11-20T19:26:02,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/8e806298895c4d138260f96354c7df09 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/8e806298895c4d138260f96354c7df09 2024-11-20T19:26:02,282 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/8e806298895c4d138260f96354c7df09, entries=150, sequenceid=460, filesize=12.0 K 2024-11-20T19:26:02,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/e921e1ea86b74f6b8e0986dc396f9e5b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/e921e1ea86b74f6b8e0986dc396f9e5b 2024-11-20T19:26:02,286 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/e921e1ea86b74f6b8e0986dc396f9e5b, entries=150, sequenceid=460, filesize=12.0 K 2024-11-20T19:26:02,287 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 77d20aeb75d4c61c8417aef439da7cbf in 1692ms, sequenceid=460, compaction requested=true 2024-11-20T19:26:02,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:26:02,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:26:02,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-20T19:26:02,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-20T19:26:02,289 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-20T19:26:02,289 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.8420 sec 2024-11-20T19:26:02,290 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 3.8480 sec 2024-11-20T19:26:02,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-20T19:26:02,549 INFO [Thread-598 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-20T19:26:02,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:02,761 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:26:02,761 DEBUG [Thread-592 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ebda6ad to 127.0.0.1:50476 2024-11-20T19:26:02,761 DEBUG [Thread-592 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:02,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:26:02,761 DEBUG [Thread-590 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7bad2e85 to 127.0.0.1:50476 2024-11-20T19:26:02,761 DEBUG [Thread-590 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:02,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:02,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:26:02,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:02,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:26:02,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:02,767 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a0026c4a34e3499193097d5fae27e152_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130760634/Put/seqid=0 2024-11-20T19:26:02,783 DEBUG [Thread-588 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7362d978 to 127.0.0.1:50476 2024-11-20T19:26:02,783 DEBUG [Thread-588 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:02,784 DEBUG [Thread-596 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x465dc764 to 127.0.0.1:50476 2024-11-20T19:26:02,784 DEBUG [Thread-596 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:02,784 DEBUG [Thread-594 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x19a533a3 to 127.0.0.1:50476 2024-11-20T19:26:02,785 DEBUG [Thread-594 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:02,785 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T19:26:02,785 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 87 2024-11-20T19:26:02,785 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 86 2024-11-20T19:26:02,785 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-11-20T19:26:02,785 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 93 2024-11-20T19:26:02,785 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-11-20T19:26:02,785 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T19:26:02,785 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3271 2024-11-20T19:26:02,785 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3107 2024-11-20T19:26:02,785 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T19:26:02,785 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1401 2024-11-20T19:26:02,785 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4203 rows 2024-11-20T19:26:02,785 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1395 2024-11-20T19:26:02,785 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4185 rows 2024-11-20T19:26:02,785 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T19:26:02,785 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04506927 to 127.0.0.1:50476 2024-11-20T19:26:02,785 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:02,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742069_1245 (size=12454) 2024-11-20T19:26:02,787 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:02,787 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T19:26:02,789 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T19:26:02,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:02,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T19:26:02,793 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130762793"}]},"ts":"1732130762793"} 2024-11-20T19:26:02,793 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a0026c4a34e3499193097d5fae27e152_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a0026c4a34e3499193097d5fae27e152_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:02,794 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T19:26:02,794 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/1d50dde7fe0b4606bd0661c6d5d96e70, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:26:02,795 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/1d50dde7fe0b4606bd0661c6d5d96e70 is 175, key is test_row_0/A:col10/1732130760634/Put/seqid=0 2024-11-20T19:26:02,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742070_1246 (size=31255) 2024-11-20T19:26:02,818 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T19:26:02,819 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:26:02,820 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=55, ppid=54, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=77d20aeb75d4c61c8417aef439da7cbf, UNASSIGN}] 2024-11-20T19:26:02,820 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=55, ppid=54, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=77d20aeb75d4c61c8417aef439da7cbf, UNASSIGN 2024-11-20T19:26:02,821 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=55 updating hbase:meta row=77d20aeb75d4c61c8417aef439da7cbf, regionState=CLOSING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:02,822 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:26:02,822 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; CloseRegionProcedure 77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:26:02,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T19:26:02,973 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:02,974 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(124): Close 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:02,974 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:26:02,975 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1681): Closing 77d20aeb75d4c61c8417aef439da7cbf, disabling compactions & flushes 2024-11-20T19:26:02,975 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:26:03,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T19:26:03,199 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=487, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/1d50dde7fe0b4606bd0661c6d5d96e70 2024-11-20T19:26:03,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/573200a38fe84a0c8b6fe985c7fb97a6 is 50, key is test_row_0/B:col10/1732130760634/Put/seqid=0 2024-11-20T19:26:03,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742071_1247 (size=12301) 2024-11-20T19:26:03,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T19:26:03,618 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/573200a38fe84a0c8b6fe985c7fb97a6 2024-11-20T19:26:03,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/03a82c6d18d94605964a2cc0a1db370f is 50, key is test_row_0/C:col10/1732130760634/Put/seqid=0 2024-11-20T19:26:03,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742072_1248 (size=12301) 2024-11-20T19:26:03,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T19:26:04,036 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/03a82c6d18d94605964a2cc0a1db370f 2024-11-20T19:26:04,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/1d50dde7fe0b4606bd0661c6d5d96e70 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/1d50dde7fe0b4606bd0661c6d5d96e70 2024-11-20T19:26:04,047 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/1d50dde7fe0b4606bd0661c6d5d96e70, entries=150, sequenceid=487, filesize=30.5 K 2024-11-20T19:26:04,048 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/573200a38fe84a0c8b6fe985c7fb97a6 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/573200a38fe84a0c8b6fe985c7fb97a6 2024-11-20T19:26:04,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/573200a38fe84a0c8b6fe985c7fb97a6, entries=150, sequenceid=487, filesize=12.0 K 2024-11-20T19:26:04,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/03a82c6d18d94605964a2cc0a1db370f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/03a82c6d18d94605964a2cc0a1db370f 2024-11-20T19:26:04,059 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/03a82c6d18d94605964a2cc0a1db370f, entries=150, sequenceid=487, filesize=12.0 K 2024-11-20T19:26:04,060 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=20.13 KB/20610 for 77d20aeb75d4c61c8417aef439da7cbf in 1299ms, sequenceid=487, compaction requested=true 2024-11-20T19:26:04,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:26:04,060 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:26:04,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:04,060 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. because compaction request was cancelled 2024-11-20T19:26:04,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:04,060 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:26:04,060 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:A 2024-11-20T19:26:04,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:04,060 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. after waiting 0 ms 2024-11-20T19:26:04,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:04,060 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. because compaction request was cancelled 2024-11-20T19:26:04,060 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:26:04,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 77d20aeb75d4c61c8417aef439da7cbf:C, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:04,060 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:B 2024-11-20T19:26:04,060 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. because compaction request was cancelled 2024-11-20T19:26:04,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:04,060 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 77d20aeb75d4c61c8417aef439da7cbf:C 2024-11-20T19:26:04,060 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(2837): Flushing 77d20aeb75d4c61c8417aef439da7cbf 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-20T19:26:04,060 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=A 2024-11-20T19:26:04,061 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:04,061 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=B 2024-11-20T19:26:04,061 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:04,061 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 77d20aeb75d4c61c8417aef439da7cbf, store=C 2024-11-20T19:26:04,061 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:04,066 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a2973200a1ff4f7bb42f89e56db49edb_77d20aeb75d4c61c8417aef439da7cbf is 50, key is test_row_0/A:col10/1732130762784/Put/seqid=0 2024-11-20T19:26:04,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742073_1249 (size=9914) 2024-11-20T19:26:04,471 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:04,480 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a2973200a1ff4f7bb42f89e56db49edb_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a2973200a1ff4f7bb42f89e56db49edb_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:04,481 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/5949f865ab814d4cadc7952ae81ebe70, store: [table=TestAcidGuarantees family=A region=77d20aeb75d4c61c8417aef439da7cbf] 2024-11-20T19:26:04,482 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/5949f865ab814d4cadc7952ae81ebe70 is 175, key is test_row_0/A:col10/1732130762784/Put/seqid=0 2024-11-20T19:26:04,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742074_1250 (size=22561) 2024-11-20T19:26:04,888 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=493, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/5949f865ab814d4cadc7952ae81ebe70 2024-11-20T19:26:04,895 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/ed40aa84825b4e388378bba819bf6279 is 50, key is test_row_0/B:col10/1732130762784/Put/seqid=0 2024-11-20T19:26:04,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742075_1251 (size=9857) 2024-11-20T19:26:04,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T19:26:05,302 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=493 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/ed40aa84825b4e388378bba819bf6279 2024-11-20T19:26:05,314 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/d15c966240c04ec69c3c9a5bbd7f4173 is 50, key is test_row_0/C:col10/1732130762784/Put/seqid=0 2024-11-20T19:26:05,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742076_1252 (size=9857) 2024-11-20T19:26:05,720 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=493 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/d15c966240c04ec69c3c9a5bbd7f4173 2024-11-20T19:26:05,729 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/A/5949f865ab814d4cadc7952ae81ebe70 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/5949f865ab814d4cadc7952ae81ebe70 2024-11-20T19:26:05,733 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/5949f865ab814d4cadc7952ae81ebe70, entries=100, sequenceid=493, filesize=22.0 K 2024-11-20T19:26:05,734 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/B/ed40aa84825b4e388378bba819bf6279 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/ed40aa84825b4e388378bba819bf6279 2024-11-20T19:26:05,739 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/ed40aa84825b4e388378bba819bf6279, entries=100, sequenceid=493, filesize=9.6 K 2024-11-20T19:26:05,740 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/.tmp/C/d15c966240c04ec69c3c9a5bbd7f4173 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/d15c966240c04ec69c3c9a5bbd7f4173 2024-11-20T19:26:05,744 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/d15c966240c04ec69c3c9a5bbd7f4173, entries=100, sequenceid=493, filesize=9.6 K 2024-11-20T19:26:05,745 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 77d20aeb75d4c61c8417aef439da7cbf in 1685ms, sequenceid=493, compaction requested=true 2024-11-20T19:26:05,746 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7cfb627e481d400684668d96f4847444, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/f2221adc63dd4633b32dd28e36beb848, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/6f18b099c6424c5dae4f2efb19051c1e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/084f0550723e462ab016d7cb1a5aa0a0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/55ddb1f378a6451fabd1191b5b7a05b0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2648aa553dcd4707bce642d3b7ce4e86, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/eebe6f49b5144d0cb8c8caa685ae23e1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/d14125ddda1040fa95e21dc6e6442a64, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/12fd047c5f4c465dbfb1a4cad9c308d4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/dc2018e14ff741269aee7a961a5ff050, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2ef22921e9644e1dbfc6b43123f78e98, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/60b3e79ab452429587f4cee84e8a7cdf, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/4ad7e95a95634e12a374cf7f4fe975c6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/d93bc65c53d7436e9eefa99dfb69b6fb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/40f35ebe77ee42f2852d3d64cb898707, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/ae3239cfa04c435aa7190cebda3002ab, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7d4e130a9079438190577153ae72419d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7561d720f75949619edb9dede270c8c1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/933660c772684a33af3725316ccdec2d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/a723ea11683e43a5a3806ed5e479f4da, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/faef370e69dc428eac9a0a5ba1245706, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/aa4ce61b89fe4fd6aeaf596263df1318, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/05804593f89847bf8090619e98606d23, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2b484cd0afd940b78d1b270ae69c74a4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/83e9218a69174aea9e7102395190fbec, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/1ff875f249bb42e8ba4ab04b9656796b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/3661253e60224a719e9e308ba42d7684, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/758673adfc4b4b83a84a40391300172d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/6d0a03b6fbb7435e8571fd657cc9367e] to archive 2024-11-20T19:26:05,747 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:26:05,750 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7cfb627e481d400684668d96f4847444 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7cfb627e481d400684668d96f4847444 2024-11-20T19:26:05,751 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/f2221adc63dd4633b32dd28e36beb848 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/f2221adc63dd4633b32dd28e36beb848 2024-11-20T19:26:05,753 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/6f18b099c6424c5dae4f2efb19051c1e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/6f18b099c6424c5dae4f2efb19051c1e 2024-11-20T19:26:05,755 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/084f0550723e462ab016d7cb1a5aa0a0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/084f0550723e462ab016d7cb1a5aa0a0 2024-11-20T19:26:05,757 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/55ddb1f378a6451fabd1191b5b7a05b0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/55ddb1f378a6451fabd1191b5b7a05b0 2024-11-20T19:26:05,759 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2648aa553dcd4707bce642d3b7ce4e86 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2648aa553dcd4707bce642d3b7ce4e86 2024-11-20T19:26:05,761 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/eebe6f49b5144d0cb8c8caa685ae23e1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/eebe6f49b5144d0cb8c8caa685ae23e1 2024-11-20T19:26:05,763 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/d14125ddda1040fa95e21dc6e6442a64 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/d14125ddda1040fa95e21dc6e6442a64 2024-11-20T19:26:05,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/12fd047c5f4c465dbfb1a4cad9c308d4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/12fd047c5f4c465dbfb1a4cad9c308d4 2024-11-20T19:26:05,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/dc2018e14ff741269aee7a961a5ff050 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/dc2018e14ff741269aee7a961a5ff050 2024-11-20T19:26:05,769 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2ef22921e9644e1dbfc6b43123f78e98 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2ef22921e9644e1dbfc6b43123f78e98 2024-11-20T19:26:05,771 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/60b3e79ab452429587f4cee84e8a7cdf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/60b3e79ab452429587f4cee84e8a7cdf 2024-11-20T19:26:05,772 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/4ad7e95a95634e12a374cf7f4fe975c6 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/4ad7e95a95634e12a374cf7f4fe975c6 2024-11-20T19:26:05,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/d93bc65c53d7436e9eefa99dfb69b6fb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/d93bc65c53d7436e9eefa99dfb69b6fb 2024-11-20T19:26:05,775 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/40f35ebe77ee42f2852d3d64cb898707 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/40f35ebe77ee42f2852d3d64cb898707 2024-11-20T19:26:05,776 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/ae3239cfa04c435aa7190cebda3002ab to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/ae3239cfa04c435aa7190cebda3002ab 2024-11-20T19:26:05,778 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7d4e130a9079438190577153ae72419d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7d4e130a9079438190577153ae72419d 2024-11-20T19:26:05,779 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7561d720f75949619edb9dede270c8c1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/7561d720f75949619edb9dede270c8c1 2024-11-20T19:26:05,781 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/933660c772684a33af3725316ccdec2d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/933660c772684a33af3725316ccdec2d 2024-11-20T19:26:05,783 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/a723ea11683e43a5a3806ed5e479f4da to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/a723ea11683e43a5a3806ed5e479f4da 2024-11-20T19:26:05,784 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/faef370e69dc428eac9a0a5ba1245706 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/faef370e69dc428eac9a0a5ba1245706 2024-11-20T19:26:05,785 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/aa4ce61b89fe4fd6aeaf596263df1318 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/aa4ce61b89fe4fd6aeaf596263df1318 2024-11-20T19:26:05,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/05804593f89847bf8090619e98606d23 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/05804593f89847bf8090619e98606d23 2024-11-20T19:26:05,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2b484cd0afd940b78d1b270ae69c74a4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/2b484cd0afd940b78d1b270ae69c74a4 2024-11-20T19:26:05,787 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/83e9218a69174aea9e7102395190fbec to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/83e9218a69174aea9e7102395190fbec 2024-11-20T19:26:05,788 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/1ff875f249bb42e8ba4ab04b9656796b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/1ff875f249bb42e8ba4ab04b9656796b 2024-11-20T19:26:05,789 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/3661253e60224a719e9e308ba42d7684 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/3661253e60224a719e9e308ba42d7684 2024-11-20T19:26:05,791 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/758673adfc4b4b83a84a40391300172d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/758673adfc4b4b83a84a40391300172d 2024-11-20T19:26:05,792 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/6d0a03b6fbb7435e8571fd657cc9367e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/6d0a03b6fbb7435e8571fd657cc9367e 2024-11-20T19:26:05,793 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/faad00aa7fd3495497cd5cf53189af8e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/aa5e109e38324612b6ff714774b3cc22, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/363dc233dcd74010b54cacdcac3c7d9b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/e8f7bb9f7aaf4e808c6aa2e9f5483cd7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/5ce8f28dd4124264bad7b8e10615f383, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/787cee67c14a4ba2b0af9b7a1a49cd24, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/aadb8b89094144099e064bb81abb1241, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/77772554cda24d82beb6dca6dd2b4d37, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/87d90c8ac68241fcb426f156136d33be, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/d95b52449de34698a7ad0394525c8bc0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/4a49c332ee1740ff925ccbc2246eaf4a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/6ca2956fd69140e2aebdf51edefabcd1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/70ca66ec0c8f43b487a703bc65278c1c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/1abf032d111a4135bfb166cd927358ad, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/3eff0e3944d4415aa0afe18d3847d05c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/64244eea71ee4fa98048da0ef71fdb09, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/12df09f6a2b14ebfb22064592597eb99, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/f8e49ec9144f4f908113698460633165, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/9da041f2653e4f14839f833ccfdf8b9d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/8c5d74fa59bc45ce94af973e878a8a5e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/7bc1beda36f54f988f617b7d0e10a3c9, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/ea5c788f75104f099067d9ce58450ed1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/e5c8b675f9ac41f08a96970785a55acd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/fd351726dad042d3a6dd4d6271369ec4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/36fbd765c6c44b47a627fd94f5241965, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/2b3529b65bdd4773beb0cff1097585dd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/ef7dc5f7944d406d94e39248bf1217bc, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/5969261640fd4e188a9a942405a54a3b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/b6492d50ed1a4656924af0afdda932e6] to archive 2024-11-20T19:26:05,794 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:26:05,796 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/faad00aa7fd3495497cd5cf53189af8e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/faad00aa7fd3495497cd5cf53189af8e 2024-11-20T19:26:05,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/aa5e109e38324612b6ff714774b3cc22 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/aa5e109e38324612b6ff714774b3cc22 2024-11-20T19:26:05,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/363dc233dcd74010b54cacdcac3c7d9b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/363dc233dcd74010b54cacdcac3c7d9b 2024-11-20T19:26:05,798 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/e8f7bb9f7aaf4e808c6aa2e9f5483cd7 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/e8f7bb9f7aaf4e808c6aa2e9f5483cd7 2024-11-20T19:26:05,799 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/5ce8f28dd4124264bad7b8e10615f383 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/5ce8f28dd4124264bad7b8e10615f383 2024-11-20T19:26:05,800 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/787cee67c14a4ba2b0af9b7a1a49cd24 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/787cee67c14a4ba2b0af9b7a1a49cd24 2024-11-20T19:26:05,801 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/aadb8b89094144099e064bb81abb1241 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/aadb8b89094144099e064bb81abb1241 2024-11-20T19:26:05,802 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/77772554cda24d82beb6dca6dd2b4d37 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/77772554cda24d82beb6dca6dd2b4d37 2024-11-20T19:26:05,803 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/87d90c8ac68241fcb426f156136d33be to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/87d90c8ac68241fcb426f156136d33be 2024-11-20T19:26:05,803 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/d95b52449de34698a7ad0394525c8bc0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/d95b52449de34698a7ad0394525c8bc0 2024-11-20T19:26:05,804 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/4a49c332ee1740ff925ccbc2246eaf4a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/4a49c332ee1740ff925ccbc2246eaf4a 2024-11-20T19:26:05,805 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/6ca2956fd69140e2aebdf51edefabcd1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/6ca2956fd69140e2aebdf51edefabcd1 2024-11-20T19:26:05,806 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/70ca66ec0c8f43b487a703bc65278c1c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/70ca66ec0c8f43b487a703bc65278c1c 2024-11-20T19:26:05,807 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/1abf032d111a4135bfb166cd927358ad to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/1abf032d111a4135bfb166cd927358ad 2024-11-20T19:26:05,808 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/3eff0e3944d4415aa0afe18d3847d05c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/3eff0e3944d4415aa0afe18d3847d05c 2024-11-20T19:26:05,809 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/64244eea71ee4fa98048da0ef71fdb09 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/64244eea71ee4fa98048da0ef71fdb09 2024-11-20T19:26:05,810 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/12df09f6a2b14ebfb22064592597eb99 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/12df09f6a2b14ebfb22064592597eb99 2024-11-20T19:26:05,810 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/f8e49ec9144f4f908113698460633165 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/f8e49ec9144f4f908113698460633165 2024-11-20T19:26:05,811 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/9da041f2653e4f14839f833ccfdf8b9d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/9da041f2653e4f14839f833ccfdf8b9d 2024-11-20T19:26:05,812 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/8c5d74fa59bc45ce94af973e878a8a5e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/8c5d74fa59bc45ce94af973e878a8a5e 2024-11-20T19:26:05,813 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/7bc1beda36f54f988f617b7d0e10a3c9 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/7bc1beda36f54f988f617b7d0e10a3c9 2024-11-20T19:26:05,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/ea5c788f75104f099067d9ce58450ed1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/ea5c788f75104f099067d9ce58450ed1 2024-11-20T19:26:05,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/e5c8b675f9ac41f08a96970785a55acd to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/e5c8b675f9ac41f08a96970785a55acd 2024-11-20T19:26:05,815 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/fd351726dad042d3a6dd4d6271369ec4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/fd351726dad042d3a6dd4d6271369ec4 2024-11-20T19:26:05,816 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/36fbd765c6c44b47a627fd94f5241965 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/36fbd765c6c44b47a627fd94f5241965 2024-11-20T19:26:05,816 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/2b3529b65bdd4773beb0cff1097585dd to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/2b3529b65bdd4773beb0cff1097585dd 2024-11-20T19:26:05,817 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/ef7dc5f7944d406d94e39248bf1217bc to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/ef7dc5f7944d406d94e39248bf1217bc 2024-11-20T19:26:05,818 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/5969261640fd4e188a9a942405a54a3b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/5969261640fd4e188a9a942405a54a3b 2024-11-20T19:26:05,819 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/b6492d50ed1a4656924af0afdda932e6 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/b6492d50ed1a4656924af0afdda932e6 2024-11-20T19:26:05,820 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/edbee67de31446e5a03fd8cbb2c7aad7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b5ae243079d9451e849d22236aab9f38, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/2df2a93ba3794462b80e8998feb78254, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1cf83c47679146d891f1656b4058cc26, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/df69777038eb495f94270a1713ad36e3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/e1bf5517961b4cd7add97897780f4b8e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/64220ae1816d4a2385ee996524f2899b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/736622f00f89484a9e23913911f0db1f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/818a4407e8c847659382f06069e89e77, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/76c3bb2ef2fc4e60ab01485c73f49f69, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/5b35d29d1ce54e8faa979949a09659d2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4b285f7f84dc47e1b166a51465d800ba, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4296083ef8b34351b4b8ee9ebfa9ebeb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/85396a2be66f44dab5f45d2d9066f063, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1ce09834e8964ceabfd1921f04d912fa, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/787a550e318e4c4d94ac97bf2d92b9da, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/f021b3338c544e809d06d4f7a5bfa94d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b1cec8baa74b4bd8a2723b0b57b67f2b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b400276b84334ecfa3ae734c01dff52f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/f3768fc1503c4a389c68f2ddbcb5e04f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/e19299755d324f8b895aee525f80ac51, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1e4bd6892ed340748a7af39818b68370, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/f4a587672b1a4a8981c217d3e0487761, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4adb29e876a046ccb8f5e16cf0a721a0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/14ccb55a753a4bd697c5db6c606a47e2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/ad1e3656f26b4b229041d016635af15b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/520aabc64ae34c2faee4efd00aab91ed, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/bbda7f34a970438fb498c647ddab2395, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/04fa17ae58ab47bebf9e6b296f7a8c98] to archive 2024-11-20T19:26:05,821 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:26:05,822 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/edbee67de31446e5a03fd8cbb2c7aad7 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/edbee67de31446e5a03fd8cbb2c7aad7 2024-11-20T19:26:05,823 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b5ae243079d9451e849d22236aab9f38 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b5ae243079d9451e849d22236aab9f38 2024-11-20T19:26:05,823 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/2df2a93ba3794462b80e8998feb78254 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/2df2a93ba3794462b80e8998feb78254 2024-11-20T19:26:05,824 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1cf83c47679146d891f1656b4058cc26 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1cf83c47679146d891f1656b4058cc26 2024-11-20T19:26:05,825 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/df69777038eb495f94270a1713ad36e3 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/df69777038eb495f94270a1713ad36e3 2024-11-20T19:26:05,826 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/e1bf5517961b4cd7add97897780f4b8e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/e1bf5517961b4cd7add97897780f4b8e 2024-11-20T19:26:05,827 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/64220ae1816d4a2385ee996524f2899b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/64220ae1816d4a2385ee996524f2899b 2024-11-20T19:26:05,827 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/736622f00f89484a9e23913911f0db1f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/736622f00f89484a9e23913911f0db1f 2024-11-20T19:26:05,828 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/818a4407e8c847659382f06069e89e77 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/818a4407e8c847659382f06069e89e77 2024-11-20T19:26:05,829 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/76c3bb2ef2fc4e60ab01485c73f49f69 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/76c3bb2ef2fc4e60ab01485c73f49f69 2024-11-20T19:26:05,829 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/5b35d29d1ce54e8faa979949a09659d2 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/5b35d29d1ce54e8faa979949a09659d2 2024-11-20T19:26:05,830 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4b285f7f84dc47e1b166a51465d800ba to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4b285f7f84dc47e1b166a51465d800ba 2024-11-20T19:26:05,831 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4296083ef8b34351b4b8ee9ebfa9ebeb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4296083ef8b34351b4b8ee9ebfa9ebeb 2024-11-20T19:26:05,832 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/85396a2be66f44dab5f45d2d9066f063 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/85396a2be66f44dab5f45d2d9066f063 2024-11-20T19:26:05,832 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1ce09834e8964ceabfd1921f04d912fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1ce09834e8964ceabfd1921f04d912fa 2024-11-20T19:26:05,833 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/787a550e318e4c4d94ac97bf2d92b9da to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/787a550e318e4c4d94ac97bf2d92b9da 2024-11-20T19:26:05,834 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/f021b3338c544e809d06d4f7a5bfa94d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/f021b3338c544e809d06d4f7a5bfa94d 2024-11-20T19:26:05,835 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b1cec8baa74b4bd8a2723b0b57b67f2b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b1cec8baa74b4bd8a2723b0b57b67f2b 2024-11-20T19:26:05,836 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b400276b84334ecfa3ae734c01dff52f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b400276b84334ecfa3ae734c01dff52f 2024-11-20T19:26:05,836 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/f3768fc1503c4a389c68f2ddbcb5e04f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/f3768fc1503c4a389c68f2ddbcb5e04f 2024-11-20T19:26:05,837 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/e19299755d324f8b895aee525f80ac51 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/e19299755d324f8b895aee525f80ac51 2024-11-20T19:26:05,838 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1e4bd6892ed340748a7af39818b68370 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/1e4bd6892ed340748a7af39818b68370 2024-11-20T19:26:05,839 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/f4a587672b1a4a8981c217d3e0487761 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/f4a587672b1a4a8981c217d3e0487761 2024-11-20T19:26:05,839 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4adb29e876a046ccb8f5e16cf0a721a0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/4adb29e876a046ccb8f5e16cf0a721a0 2024-11-20T19:26:05,840 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/14ccb55a753a4bd697c5db6c606a47e2 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/14ccb55a753a4bd697c5db6c606a47e2 2024-11-20T19:26:05,841 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/ad1e3656f26b4b229041d016635af15b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/ad1e3656f26b4b229041d016635af15b 2024-11-20T19:26:05,842 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/520aabc64ae34c2faee4efd00aab91ed to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/520aabc64ae34c2faee4efd00aab91ed 2024-11-20T19:26:05,843 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/bbda7f34a970438fb498c647ddab2395 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/bbda7f34a970438fb498c647ddab2395 2024-11-20T19:26:05,844 DEBUG [StoreCloser-TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/04fa17ae58ab47bebf9e6b296f7a8c98 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/04fa17ae58ab47bebf9e6b296f7a8c98 2024-11-20T19:26:05,847 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/recovered.edits/496.seqid, newMaxSeqId=496, maxSeqId=4 2024-11-20T19:26:05,848 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf. 2024-11-20T19:26:05,848 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1635): Region close journal for 77d20aeb75d4c61c8417aef439da7cbf: 2024-11-20T19:26:05,849 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(170): Closed 77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:05,849 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=55 updating hbase:meta row=77d20aeb75d4c61c8417aef439da7cbf, regionState=CLOSED 2024-11-20T19:26:05,851 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-20T19:26:05,851 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; CloseRegionProcedure 77d20aeb75d4c61c8417aef439da7cbf, server=db9c3a6c6492,35979,1732130703276 in 3.0280 sec 2024-11-20T19:26:05,852 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=55, resume processing ppid=54 2024-11-20T19:26:05,852 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, ppid=54, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=77d20aeb75d4c61c8417aef439da7cbf, UNASSIGN in 3.0310 sec 2024-11-20T19:26:05,853 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-20T19:26:05,853 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 3.0330 sec 2024-11-20T19:26:05,853 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130765853"}]},"ts":"1732130765853"} 2024-11-20T19:26:05,854 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T19:26:05,985 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T19:26:05,988 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 3.1970 sec 2024-11-20T19:26:06,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-20T19:26:06,902 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-20T19:26:06,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T19:26:06,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:06,910 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:06,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T19:26:06,911 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=57, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:06,913 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,917 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/recovered.edits] 2024-11-20T19:26:06,922 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/1d50dde7fe0b4606bd0661c6d5d96e70 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/1d50dde7fe0b4606bd0661c6d5d96e70 2024-11-20T19:26:06,925 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/322377c6a3414b639b4d2735962fae68 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/322377c6a3414b639b4d2735962fae68 2024-11-20T19:26:06,928 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/5949f865ab814d4cadc7952ae81ebe70 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/5949f865ab814d4cadc7952ae81ebe70 2024-11-20T19:26:06,930 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/5d8dcb6f1f99479f85d14db533ad69ba to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/5d8dcb6f1f99479f85d14db533ad69ba 2024-11-20T19:26:06,933 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/e48af47e9df64306bd302ca735c3609e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/A/e48af47e9df64306bd302ca735c3609e 2024-11-20T19:26:06,938 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/4350e9de531d440da96b5e1a30793073 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/4350e9de531d440da96b5e1a30793073 2024-11-20T19:26:06,941 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/573200a38fe84a0c8b6fe985c7fb97a6 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/573200a38fe84a0c8b6fe985c7fb97a6 2024-11-20T19:26:06,943 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/8e806298895c4d138260f96354c7df09 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/8e806298895c4d138260f96354c7df09 2024-11-20T19:26:06,946 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/b11536628f304fd9870ce8a6417d96c0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/b11536628f304fd9870ce8a6417d96c0 2024-11-20T19:26:06,947 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/ed40aa84825b4e388378bba819bf6279 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/B/ed40aa84825b4e388378bba819bf6279 2024-11-20T19:26:06,950 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/03a82c6d18d94605964a2cc0a1db370f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/03a82c6d18d94605964a2cc0a1db370f 2024-11-20T19:26:06,951 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/9f6c49fb93ce4d84b605aab9e58b8194 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/9f6c49fb93ce4d84b605aab9e58b8194 2024-11-20T19:26:06,953 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b2ac4f1236754652a0a79d485f3b0926 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/b2ac4f1236754652a0a79d485f3b0926 2024-11-20T19:26:06,954 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/d15c966240c04ec69c3c9a5bbd7f4173 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/d15c966240c04ec69c3c9a5bbd7f4173 2024-11-20T19:26:06,955 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/e921e1ea86b74f6b8e0986dc396f9e5b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/C/e921e1ea86b74f6b8e0986dc396f9e5b 2024-11-20T19:26:06,957 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/recovered.edits/496.seqid to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf/recovered.edits/496.seqid 2024-11-20T19:26:06,958 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,958 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T19:26:06,958 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T19:26:06,959 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T19:26:06,963 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200b320ea72ac148aaa5e83afb44f25fe6_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200b320ea72ac148aaa5e83afb44f25fe6_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,964 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200b8c235f63c4469ba77913c5497050fd_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200b8c235f63c4469ba77913c5497050fd_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,965 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112016554ed1f210488d80dd94a15ddc9ad9_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112016554ed1f210488d80dd94a15ddc9ad9_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,965 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201c5f4185d634424e90bf60f9e58241e3_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411201c5f4185d634424e90bf60f9e58241e3_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,966 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202d763547a0dd4c85b6898d8166c6cc30_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202d763547a0dd4c85b6898d8166c6cc30_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,967 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112040b13c7e3910411ba26de4b97f52eead_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112040b13c7e3910411ba26de4b97f52eead_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,968 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204fa80d4392f6492e8bc501a96d837230_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204fa80d4392f6492e8bc501a96d837230_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,969 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120539b4fb865dd4533ac72e2d94d89775b_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120539b4fb865dd4533ac72e2d94d89775b_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,970 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205a8901cd234046c098876d450a38544d_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205a8901cd234046c098876d450a38544d_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,971 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112076c0c63ec84b4385bf5fa8c6cc91e951_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112076c0c63ec84b4385bf5fa8c6cc91e951_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,972 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207cc3d712328c4759a030a44d34a03b2e_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207cc3d712328c4759a030a44d34a03b2e_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,973 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208e13654abaef4e1f8b09fe79758564e7_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411208e13654abaef4e1f8b09fe79758564e7_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,974 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112096a7ba5a9e9c442f913e4678a5468322_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112096a7ba5a9e9c442f913e4678a5468322_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,975 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a0026c4a34e3499193097d5fae27e152_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a0026c4a34e3499193097d5fae27e152_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,976 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a090710bd346452b944e359ec3304091_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a090710bd346452b944e359ec3304091_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,977 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a2973200a1ff4f7bb42f89e56db49edb_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a2973200a1ff4f7bb42f89e56db49edb_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,978 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120abbc7e03755048f8adb9f4fa710a28c9_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120abbc7e03755048f8adb9f4fa710a28c9_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,979 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120abc9b952be964d37a73ee5a599f0db78_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120abc9b952be964d37a73ee5a599f0db78_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,980 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b6af8b78e1e7424d91d513d739fa03cf_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b6af8b78e1e7424d91d513d739fa03cf_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,981 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c318ddc2b54c44f3b0572e02283e6c13_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c318ddc2b54c44f3b0572e02283e6c13_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,982 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c76625438472482d9787130cc3148c94_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c76625438472482d9787130cc3148c94_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,983 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c94aba4354fb48d1be9426f04274131c_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c94aba4354fb48d1be9426f04274131c_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,984 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dcb85625a1e646edb87101dcc7c6f9d3_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dcb85625a1e646edb87101dcc7c6f9d3_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,985 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ea730ebcfe7f4926a9296a97621f2ee5_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ea730ebcfe7f4926a9296a97621f2ee5_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,985 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fa2360a2f41e4461bbbb1a4e9e9169fe_77d20aeb75d4c61c8417aef439da7cbf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fa2360a2f41e4461bbbb1a4e9e9169fe_77d20aeb75d4c61c8417aef439da7cbf 2024-11-20T19:26:06,986 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T19:26:06,988 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=57, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:06,990 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T19:26:06,992 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T19:26:06,993 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=57, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:06,993 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T19:26:06,993 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732130766993"}]},"ts":"9223372036854775807"} 2024-11-20T19:26:06,995 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T19:26:06,995 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 77d20aeb75d4c61c8417aef439da7cbf, NAME => 'TestAcidGuarantees,,1732130738266.77d20aeb75d4c61c8417aef439da7cbf.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T19:26:06,995 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T19:26:06,995 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732130766995"}]},"ts":"9223372036854775807"} 2024-11-20T19:26:06,997 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T19:26:07,010 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=57, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:07,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T19:26:07,011 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 106 msec 2024-11-20T19:26:07,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-20T19:26:07,213 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-20T19:26:07,227 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=241 (was 238) Potentially hanging thread: hconnection-0x571b1d6e-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-907484729_22 at /127.0.0.1:46660 [Waiting for operation #196] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-907484729_22 at /127.0.0.1:42098 [Waiting for operation #287] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x571b1d6e-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1007737131_22 at /127.0.0.1:35666 [Waiting for operation #320] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/cluster_11f679c0-5e90-8650-28a2-21724370d870/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x571b1d6e-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/cluster_11f679c0-5e90-8650-28a2-21724370d870/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x571b1d6e-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=460 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=678 (was 543) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3487 (was 3715) 2024-11-20T19:26:07,236 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=241, OpenFileDescriptor=460, MaxFileDescriptor=1048576, SystemLoadAverage=678, ProcessCount=11, AvailableMemoryMB=3487 2024-11-20T19:26:07,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:26:07,238 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:26:07,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:07,240 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T19:26:07,240 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:07,240 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 58 2024-11-20T19:26:07,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=58 2024-11-20T19:26:07,241 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T19:26:07,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742077_1253 (size=963) 2024-11-20T19:26:07,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=58 2024-11-20T19:26:07,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=58 2024-11-20T19:26:07,653 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d 2024-11-20T19:26:07,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742078_1254 (size=53) 2024-11-20T19:26:07,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=58 2024-11-20T19:26:08,068 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:26:08,068 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 05d8fd611fc9337dfa63e932920aeaaa, disabling compactions & flushes 2024-11-20T19:26:08,069 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:08,069 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:08,069 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. after waiting 0 ms 2024-11-20T19:26:08,069 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:08,069 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:08,069 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:08,071 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T19:26:08,072 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732130768072"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732130768072"}]},"ts":"1732130768072"} 2024-11-20T19:26:08,074 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T19:26:08,076 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T19:26:08,076 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130768076"}]},"ts":"1732130768076"} 2024-11-20T19:26:08,078 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T19:26:08,135 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=05d8fd611fc9337dfa63e932920aeaaa, ASSIGN}] 2024-11-20T19:26:08,138 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=05d8fd611fc9337dfa63e932920aeaaa, ASSIGN 2024-11-20T19:26:08,140 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=05d8fd611fc9337dfa63e932920aeaaa, ASSIGN; state=OFFLINE, location=db9c3a6c6492,35979,1732130703276; forceNewPlan=false, retain=false 2024-11-20T19:26:08,191 DEBUG [master/db9c3a6c6492:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region d21e0da06747a4b3da8e29803090bc10 changed from -1.0 to 0.0, refreshing cache 2024-11-20T19:26:08,291 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=05d8fd611fc9337dfa63e932920aeaaa, regionState=OPENING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:08,293 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; OpenRegionProcedure 05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:26:08,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=58 2024-11-20T19:26:08,447 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:08,450 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:08,450 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] regionserver.HRegion(7285): Opening region: {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:26:08,451 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:08,451 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:26:08,451 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] regionserver.HRegion(7327): checking encryption for 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:08,451 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] regionserver.HRegion(7330): checking classloading for 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:08,453 INFO [StoreOpener-05d8fd611fc9337dfa63e932920aeaaa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:08,455 INFO [StoreOpener-05d8fd611fc9337dfa63e932920aeaaa-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:08,455 INFO [StoreOpener-05d8fd611fc9337dfa63e932920aeaaa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05d8fd611fc9337dfa63e932920aeaaa columnFamilyName A 2024-11-20T19:26:08,455 DEBUG [StoreOpener-05d8fd611fc9337dfa63e932920aeaaa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:08,456 INFO [StoreOpener-05d8fd611fc9337dfa63e932920aeaaa-1 {}] regionserver.HStore(327): Store=05d8fd611fc9337dfa63e932920aeaaa/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:08,456 INFO [StoreOpener-05d8fd611fc9337dfa63e932920aeaaa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:08,457 INFO [StoreOpener-05d8fd611fc9337dfa63e932920aeaaa-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:08,457 INFO [StoreOpener-05d8fd611fc9337dfa63e932920aeaaa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05d8fd611fc9337dfa63e932920aeaaa columnFamilyName B 2024-11-20T19:26:08,458 DEBUG [StoreOpener-05d8fd611fc9337dfa63e932920aeaaa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:08,458 INFO [StoreOpener-05d8fd611fc9337dfa63e932920aeaaa-1 {}] regionserver.HStore(327): Store=05d8fd611fc9337dfa63e932920aeaaa/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:08,458 INFO [StoreOpener-05d8fd611fc9337dfa63e932920aeaaa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:08,460 INFO [StoreOpener-05d8fd611fc9337dfa63e932920aeaaa-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:08,460 INFO [StoreOpener-05d8fd611fc9337dfa63e932920aeaaa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05d8fd611fc9337dfa63e932920aeaaa columnFamilyName C 2024-11-20T19:26:08,460 DEBUG [StoreOpener-05d8fd611fc9337dfa63e932920aeaaa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:08,460 INFO [StoreOpener-05d8fd611fc9337dfa63e932920aeaaa-1 {}] regionserver.HStore(327): Store=05d8fd611fc9337dfa63e932920aeaaa/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:08,461 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:08,462 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:08,462 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:08,464 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:26:08,466 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] regionserver.HRegion(1085): writing seq id for 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:08,468 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:26:08,469 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] regionserver.HRegion(1102): Opened 05d8fd611fc9337dfa63e932920aeaaa; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60731497, jitterRate=-0.09503017365932465}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:26:08,470 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] regionserver.HRegion(1001): Region open journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:08,471 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., pid=60, masterSystemTime=1732130768446 2024-11-20T19:26:08,472 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:08,472 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=60}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:08,473 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=05d8fd611fc9337dfa63e932920aeaaa, regionState=OPEN, openSeqNum=2, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:08,475 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-20T19:26:08,476 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; OpenRegionProcedure 05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 in 181 msec 2024-11-20T19:26:08,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=59, resume processing ppid=58 2024-11-20T19:26:08,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, ppid=58, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=05d8fd611fc9337dfa63e932920aeaaa, ASSIGN in 341 msec 2024-11-20T19:26:08,478 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T19:26:08,478 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130768478"}]},"ts":"1732130768478"} 2024-11-20T19:26:08,479 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T19:26:08,494 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T19:26:08,496 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2560 sec 2024-11-20T19:26:09,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=58 2024-11-20T19:26:09,351 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 58 completed 2024-11-20T19:26:09,358 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64dc42d9 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58341641 2024-11-20T19:26:09,410 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17b6adc5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:09,411 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:09,412 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51508, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:09,413 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T19:26:09,414 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52130, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T19:26:09,416 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c1ac389 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@44645c55 2024-11-20T19:26:09,427 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@669e1999, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:09,428 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x028e73c0 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@64ee0130 2024-11-20T19:26:09,435 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72aa9ee5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:09,437 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c480dfb to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683b64c3 2024-11-20T19:26:09,444 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ec09297, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:09,445 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34cb3991 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e55eb7 2024-11-20T19:26:09,452 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dfb20f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:09,453 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e9ae050 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a703d2 2024-11-20T19:26:09,460 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17cf7fc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:09,462 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14ed1e44 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78b04266 2024-11-20T19:26:09,469 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5886c0f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:09,470 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72537a47 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@88aa519 2024-11-20T19:26:09,477 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66e575aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:09,478 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x036642cb to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e998dd3 2024-11-20T19:26:09,485 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@131ceb8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:09,486 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c299cfb to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e4c79b8 2024-11-20T19:26:09,494 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a78bf6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:09,495 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x605827c9 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d1403c3 2024-11-20T19:26:09,502 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@328852db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:09,505 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:09,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-11-20T19:26:09,506 DEBUG [hconnection-0x3e228f6d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:09,506 DEBUG [hconnection-0x7b7d4f2e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:09,507 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:09,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T19:26:09,507 DEBUG [hconnection-0x4b90a49e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:09,507 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:09,508 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:09,508 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51514, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:09,508 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51516, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:09,508 DEBUG [hconnection-0x653e5da3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:09,508 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51532, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:09,509 DEBUG [hconnection-0x7490d2c0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:09,510 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51546, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:09,510 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51562, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:09,510 DEBUG [hconnection-0x77bff0fe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:09,512 DEBUG [hconnection-0x6425c3d2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:09,512 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51574, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:09,512 DEBUG [hconnection-0xa1815ef-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:09,512 DEBUG [hconnection-0x4cb3ba0b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:09,513 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51584, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:09,513 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51580, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:09,513 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51582, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:09,513 DEBUG [hconnection-0x495c83bb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:09,514 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51610, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:09,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:09,517 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:26:09,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:09,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:09,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:09,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:09,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:09,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:09,544 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/c8c56cc5f4204ae3bbe998eeee2e5c2d is 50, key is test_row_0/A:col10/1732130769511/Put/seqid=0 2024-11-20T19:26:09,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:09,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130829540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:09,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130829541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:09,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130829542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:09,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130829543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:09,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130829545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742079_1255 (size=12001) 2024-11-20T19:26:09,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/c8c56cc5f4204ae3bbe998eeee2e5c2d 2024-11-20T19:26:09,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T19:26:09,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/9e81353f330a4f4b904d723b94f20194 is 50, key is test_row_0/B:col10/1732130769511/Put/seqid=0 2024-11-20T19:26:09,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742080_1256 (size=12001) 2024-11-20T19:26:09,629 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/9e81353f330a4f4b904d723b94f20194 2024-11-20T19:26:09,648 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130829647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,648 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130829647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130829647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130829647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/34d05b6c713747e382779114be320326 is 50, key is test_row_0/C:col10/1732130769511/Put/seqid=0 2024-11-20T19:26:09,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:09,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130829650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742081_1257 (size=12001) 2024-11-20T19:26:09,660 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T19:26:09,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:09,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:09,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:09,661 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:09,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:09,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:09,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T19:26:09,813 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T19:26:09,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:09,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:09,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:09,814 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:09,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:09,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:09,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:09,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130829850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:09,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130829850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:09,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130829850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:09,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130829851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:09,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130829854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,966 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:09,966 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T19:26:09,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:09,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:09,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:09,967 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:09,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:09,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:10,057 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/34d05b6c713747e382779114be320326 2024-11-20T19:26:10,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/c8c56cc5f4204ae3bbe998eeee2e5c2d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c8c56cc5f4204ae3bbe998eeee2e5c2d 2024-11-20T19:26:10,065 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c8c56cc5f4204ae3bbe998eeee2e5c2d, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T19:26:10,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/9e81353f330a4f4b904d723b94f20194 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9e81353f330a4f4b904d723b94f20194 2024-11-20T19:26:10,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9e81353f330a4f4b904d723b94f20194, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T19:26:10,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/34d05b6c713747e382779114be320326 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/34d05b6c713747e382779114be320326 2024-11-20T19:26:10,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/34d05b6c713747e382779114be320326, entries=150, sequenceid=13, filesize=11.7 K 2024-11-20T19:26:10,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 05d8fd611fc9337dfa63e932920aeaaa in 559ms, sequenceid=13, compaction requested=false 2024-11-20T19:26:10,076 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T19:26:10,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:10,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T19:26:10,119 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-20T19:26:10,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:10,119 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T19:26:10,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:10,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:10,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:10,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:10,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:10,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:10,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/931caa3bf796422688da3db7d1e4a6fd is 50, key is test_row_0/A:col10/1732130769544/Put/seqid=0 2024-11-20T19:26:10,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742082_1258 (size=12001) 2024-11-20T19:26:10,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:10,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:10,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130830160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130830160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130830161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130830162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130830164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130830265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130830265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130830265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130830265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130830269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130830468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130830468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130830468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130830470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130830472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,529 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/931caa3bf796422688da3db7d1e4a6fd 2024-11-20T19:26:10,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/d5e1b82353064570bb3b81f54a986392 is 50, key is test_row_0/B:col10/1732130769544/Put/seqid=0 2024-11-20T19:26:10,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742083_1259 (size=12001) 2024-11-20T19:26:10,543 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/d5e1b82353064570bb3b81f54a986392 2024-11-20T19:26:10,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/b6a63137f3d94eb78da88d5848b9c589 is 50, key is test_row_0/C:col10/1732130769544/Put/seqid=0 2024-11-20T19:26:10,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742084_1260 (size=12001) 2024-11-20T19:26:10,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T19:26:10,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130830770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130830770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130830771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130830774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:10,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130830775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:10,957 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/b6a63137f3d94eb78da88d5848b9c589 2024-11-20T19:26:10,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/931caa3bf796422688da3db7d1e4a6fd as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/931caa3bf796422688da3db7d1e4a6fd 2024-11-20T19:26:10,967 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/931caa3bf796422688da3db7d1e4a6fd, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T19:26:10,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/d5e1b82353064570bb3b81f54a986392 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d5e1b82353064570bb3b81f54a986392 2024-11-20T19:26:10,972 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d5e1b82353064570bb3b81f54a986392, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T19:26:10,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/b6a63137f3d94eb78da88d5848b9c589 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/b6a63137f3d94eb78da88d5848b9c589 2024-11-20T19:26:10,976 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/b6a63137f3d94eb78da88d5848b9c589, entries=150, sequenceid=37, filesize=11.7 K 2024-11-20T19:26:10,977 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 05d8fd611fc9337dfa63e932920aeaaa in 858ms, sequenceid=37, compaction requested=false 2024-11-20T19:26:10,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:10,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:10,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-11-20T19:26:10,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-11-20T19:26:10,979 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-20T19:26:10,979 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4700 sec 2024-11-20T19:26:10,980 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 1.4750 sec 2024-11-20T19:26:11,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:11,278 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:26:11,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:11,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:11,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:11,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:11,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:11,278 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:11,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/73017182bc454e0e9413c7eb49a148a1 is 50, key is test_row_0/A:col10/1732130771277/Put/seqid=0 2024-11-20T19:26:11,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742085_1261 (size=12001) 2024-11-20T19:26:11,312 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/73017182bc454e0e9413c7eb49a148a1 2024-11-20T19:26:11,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/e5a9d1df656643fe9fd4122bf9b72a8d is 50, key is test_row_0/B:col10/1732130771277/Put/seqid=0 2024-11-20T19:26:11,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130831316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130831317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130831317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742086_1262 (size=12001) 2024-11-20T19:26:11,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130831322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130831324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,372 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T19:26:11,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130831425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130831425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130831425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130831432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130831432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-20T19:26:11,610 INFO [Thread-1176 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-20T19:26:11,611 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:11,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-11-20T19:26:11,612 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:11,613 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:11,613 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:11,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T19:26:11,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130831628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130831629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130831630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130831635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130831636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T19:26:11,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/e5a9d1df656643fe9fd4122bf9b72a8d 2024-11-20T19:26:11,737 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/e204a7992fa84bd8a53ac95f9f238dd8 is 50, key is test_row_0/C:col10/1732130771277/Put/seqid=0 2024-11-20T19:26:11,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742087_1263 (size=12001) 2024-11-20T19:26:11,764 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,765 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-20T19:26:11,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:11,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:11,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:11,765 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:11,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:11,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:11,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T19:26:11,917 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,917 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-20T19:26:11,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:11,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:11,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:11,918 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:11,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:11,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:11,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130831930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130831932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130831934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130831937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:11,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:11,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130831939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,071 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-20T19:26:12,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:12,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:12,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:12,071 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:12,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:12,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:12,141 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/e204a7992fa84bd8a53ac95f9f238dd8 2024-11-20T19:26:12,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/73017182bc454e0e9413c7eb49a148a1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/73017182bc454e0e9413c7eb49a148a1 2024-11-20T19:26:12,151 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/73017182bc454e0e9413c7eb49a148a1, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T19:26:12,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/e5a9d1df656643fe9fd4122bf9b72a8d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e5a9d1df656643fe9fd4122bf9b72a8d 2024-11-20T19:26:12,156 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e5a9d1df656643fe9fd4122bf9b72a8d, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T19:26:12,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/e204a7992fa84bd8a53ac95f9f238dd8 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/e204a7992fa84bd8a53ac95f9f238dd8 2024-11-20T19:26:12,162 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/e204a7992fa84bd8a53ac95f9f238dd8, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T19:26:12,163 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 05d8fd611fc9337dfa63e932920aeaaa in 886ms, sequenceid=51, compaction requested=true 2024-11-20T19:26:12,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:12,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:12,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:12,163 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:12,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:12,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:12,163 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:12,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:12,163 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:12,164 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:12,164 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:12,164 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/A is initiating minor compaction (all files) 2024-11-20T19:26:12,164 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/B is initiating minor compaction (all files) 2024-11-20T19:26:12,164 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/A in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:12,164 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/B in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:12,164 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9e81353f330a4f4b904d723b94f20194, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d5e1b82353064570bb3b81f54a986392, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e5a9d1df656643fe9fd4122bf9b72a8d] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=35.2 K 2024-11-20T19:26:12,164 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c8c56cc5f4204ae3bbe998eeee2e5c2d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/931caa3bf796422688da3db7d1e4a6fd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/73017182bc454e0e9413c7eb49a148a1] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=35.2 K 2024-11-20T19:26:12,165 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8c56cc5f4204ae3bbe998eeee2e5c2d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732130769511 2024-11-20T19:26:12,165 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e81353f330a4f4b904d723b94f20194, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732130769511 2024-11-20T19:26:12,165 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting d5e1b82353064570bb3b81f54a986392, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732130769538 2024-11-20T19:26:12,165 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 931caa3bf796422688da3db7d1e4a6fd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732130769538 2024-11-20T19:26:12,165 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting e5a9d1df656643fe9fd4122bf9b72a8d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732130770159 2024-11-20T19:26:12,165 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73017182bc454e0e9413c7eb49a148a1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732130770159 2024-11-20T19:26:12,185 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#B#compaction#210 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:12,185 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#A#compaction#211 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:12,185 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/43f5e6fb8704408d9513b67cd577d03a is 50, key is test_row_0/A:col10/1732130771277/Put/seqid=0 2024-11-20T19:26:12,185 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/f8649629e61e465887d292d0db02fd03 is 50, key is test_row_0/B:col10/1732130771277/Put/seqid=0 2024-11-20T19:26:12,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742088_1264 (size=12104) 2024-11-20T19:26:12,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742089_1265 (size=12104) 2024-11-20T19:26:12,197 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/f8649629e61e465887d292d0db02fd03 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/f8649629e61e465887d292d0db02fd03 2024-11-20T19:26:12,201 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/B of 05d8fd611fc9337dfa63e932920aeaaa into f8649629e61e465887d292d0db02fd03(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:12,201 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:12,201 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/B, priority=13, startTime=1732130772163; duration=0sec 2024-11-20T19:26:12,201 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:12,201 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:B 2024-11-20T19:26:12,201 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:12,203 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:12,203 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/C is initiating minor compaction (all files) 2024-11-20T19:26:12,203 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/C in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:12,203 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/34d05b6c713747e382779114be320326, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/b6a63137f3d94eb78da88d5848b9c589, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/e204a7992fa84bd8a53ac95f9f238dd8] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=35.2 K 2024-11-20T19:26:12,203 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 34d05b6c713747e382779114be320326, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732130769511 2024-11-20T19:26:12,204 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting b6a63137f3d94eb78da88d5848b9c589, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732130769538 2024-11-20T19:26:12,204 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting e204a7992fa84bd8a53ac95f9f238dd8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732130770159 2024-11-20T19:26:12,214 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#C#compaction#212 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:12,215 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/377bb4af2e504b4da8c8e0ac4c5c59b4 is 50, key is test_row_0/C:col10/1732130771277/Put/seqid=0 2024-11-20T19:26:12,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T19:26:12,223 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-11-20T19:26:12,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:12,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742090_1266 (size=12104) 2024-11-20T19:26:12,224 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T19:26:12,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:12,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:12,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:12,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:12,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:12,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:12,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/75fb8bbc8b8e4bb2a63ccc3b731351de is 50, key is test_row_0/A:col10/1732130771310/Put/seqid=0 2024-11-20T19:26:12,233 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/377bb4af2e504b4da8c8e0ac4c5c59b4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/377bb4af2e504b4da8c8e0ac4c5c59b4 2024-11-20T19:26:12,240 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/C of 05d8fd611fc9337dfa63e932920aeaaa into 377bb4af2e504b4da8c8e0ac4c5c59b4(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:12,240 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:12,240 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/C, priority=13, startTime=1732130772163; duration=0sec 2024-11-20T19:26:12,240 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:12,240 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:C 2024-11-20T19:26:12,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742091_1267 (size=12001) 2024-11-20T19:26:12,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:12,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:12,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:12,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130832444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:12,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130832445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:12,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:12,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130832445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130832446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:12,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130832447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:12,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130832548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:12,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130832549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:12,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130832549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:12,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130832549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:12,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130832551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,595 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/43f5e6fb8704408d9513b67cd577d03a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/43f5e6fb8704408d9513b67cd577d03a 2024-11-20T19:26:12,599 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/A of 05d8fd611fc9337dfa63e932920aeaaa into 43f5e6fb8704408d9513b67cd577d03a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:12,599 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:12,599 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/A, priority=13, startTime=1732130772163; duration=0sec 2024-11-20T19:26:12,599 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:12,600 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:A 2024-11-20T19:26:12,642 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/75fb8bbc8b8e4bb2a63ccc3b731351de 2024-11-20T19:26:12,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/7cc65a8b12a544e1ae6e692d6037b4de is 50, key is test_row_0/B:col10/1732130771310/Put/seqid=0 2024-11-20T19:26:12,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742092_1268 (size=12001) 2024-11-20T19:26:12,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T19:26:12,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:12,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130832750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:12,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130832751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:12,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130832751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:12,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130832751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:12,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:12,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130832754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130833053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130833054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130833054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130833057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130833058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,060 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/7cc65a8b12a544e1ae6e692d6037b4de 2024-11-20T19:26:13,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/4a0cbacaba0a448e9a058db726e9330a is 50, key is test_row_0/C:col10/1732130771310/Put/seqid=0 2024-11-20T19:26:13,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742093_1269 (size=12001) 2024-11-20T19:26:13,508 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/4a0cbacaba0a448e9a058db726e9330a 2024-11-20T19:26:13,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/75fb8bbc8b8e4bb2a63ccc3b731351de as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/75fb8bbc8b8e4bb2a63ccc3b731351de 2024-11-20T19:26:13,517 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/75fb8bbc8b8e4bb2a63ccc3b731351de, entries=150, sequenceid=74, filesize=11.7 K 2024-11-20T19:26:13,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/7cc65a8b12a544e1ae6e692d6037b4de as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/7cc65a8b12a544e1ae6e692d6037b4de 2024-11-20T19:26:13,524 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/7cc65a8b12a544e1ae6e692d6037b4de, entries=150, sequenceid=74, filesize=11.7 K 2024-11-20T19:26:13,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/4a0cbacaba0a448e9a058db726e9330a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/4a0cbacaba0a448e9a058db726e9330a 2024-11-20T19:26:13,529 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/4a0cbacaba0a448e9a058db726e9330a, entries=150, sequenceid=74, filesize=11.7 K 2024-11-20T19:26:13,530 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 05d8fd611fc9337dfa63e932920aeaaa in 1306ms, sequenceid=74, compaction requested=false 2024-11-20T19:26:13,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:13,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:13,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-11-20T19:26:13,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-11-20T19:26:13,532 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-20T19:26:13,532 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9180 sec 2024-11-20T19:26:13,534 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 1.9220 sec 2024-11-20T19:26:13,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:13,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T19:26:13,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:13,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:13,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:13,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:13,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:13,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:13,563 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/2112fa7dac4041a4a771c2d85fc1e3bf is 50, key is test_row_0/A:col10/1732130772443/Put/seqid=0 2024-11-20T19:26:13,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742094_1270 (size=12001) 2024-11-20T19:26:13,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130833574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130833575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130833576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130833578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130833579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130833680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130833680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130833680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130833682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130833684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-20T19:26:13,719 INFO [Thread-1176 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-20T19:26:13,720 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:13,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees 2024-11-20T19:26:13,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T19:26:13,721 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:13,722 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:13,722 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:13,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T19:26:13,872 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,873 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-20T19:26:13,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:13,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:13,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:13,874 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:13,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:13,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:13,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130833882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130833882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130833883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130833885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:13,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130833886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:13,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/2112fa7dac4041a4a771c2d85fc1e3bf 2024-11-20T19:26:13,979 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/8bbd64bbd6984190a786cd601e13bce2 is 50, key is test_row_0/B:col10/1732130772443/Put/seqid=0 2024-11-20T19:26:13,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742095_1271 (size=12001) 2024-11-20T19:26:14,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T19:26:14,025 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-20T19:26:14,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:14,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:14,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:14,026 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,178 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-20T19:26:14,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:14,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:14,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:14,179 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130834185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130834185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130834187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130834189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130834190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T19:26:14,330 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-20T19:26:14,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:14,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:14,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:14,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/8bbd64bbd6984190a786cd601e13bce2 2024-11-20T19:26:14,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/ad3d676ee9f04fa1a1b8cf8af1441bf2 is 50, key is test_row_0/C:col10/1732130772443/Put/seqid=0 2024-11-20T19:26:14,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742096_1272 (size=12001) 2024-11-20T19:26:14,484 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,484 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-20T19:26:14,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:14,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:14,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:14,485 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,636 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-20T19:26:14,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:14,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:14,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:14,637 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130834688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130834688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130834693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130834693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:14,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130834696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,790 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,790 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-20T19:26:14,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:14,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:14,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:14,791 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:14,798 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/ad3d676ee9f04fa1a1b8cf8af1441bf2 2024-11-20T19:26:14,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/2112fa7dac4041a4a771c2d85fc1e3bf as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/2112fa7dac4041a4a771c2d85fc1e3bf 2024-11-20T19:26:14,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/2112fa7dac4041a4a771c2d85fc1e3bf, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T19:26:14,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/8bbd64bbd6984190a786cd601e13bce2 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8bbd64bbd6984190a786cd601e13bce2 2024-11-20T19:26:14,815 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8bbd64bbd6984190a786cd601e13bce2, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T19:26:14,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/ad3d676ee9f04fa1a1b8cf8af1441bf2 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/ad3d676ee9f04fa1a1b8cf8af1441bf2 2024-11-20T19:26:14,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T19:26:14,824 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/ad3d676ee9f04fa1a1b8cf8af1441bf2, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T19:26:14,826 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 05d8fd611fc9337dfa63e932920aeaaa in 1268ms, sequenceid=91, compaction requested=true 2024-11-20T19:26:14,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:14,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:14,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:14,827 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:14,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:14,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:14,827 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:14,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:14,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:14,828 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:14,828 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/B is initiating minor compaction (all files) 2024-11-20T19:26:14,828 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:14,828 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/B in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:14,828 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/A is initiating minor compaction (all files) 2024-11-20T19:26:14,828 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/A in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:14,828 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/f8649629e61e465887d292d0db02fd03, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/7cc65a8b12a544e1ae6e692d6037b4de, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8bbd64bbd6984190a786cd601e13bce2] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=35.3 K 2024-11-20T19:26:14,828 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/43f5e6fb8704408d9513b67cd577d03a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/75fb8bbc8b8e4bb2a63ccc3b731351de, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/2112fa7dac4041a4a771c2d85fc1e3bf] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=35.3 K 2024-11-20T19:26:14,829 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting f8649629e61e465887d292d0db02fd03, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732130770159 2024-11-20T19:26:14,829 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43f5e6fb8704408d9513b67cd577d03a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732130770159 2024-11-20T19:26:14,829 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75fb8bbc8b8e4bb2a63ccc3b731351de, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732130771310 2024-11-20T19:26:14,830 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 7cc65a8b12a544e1ae6e692d6037b4de, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732130771310 2024-11-20T19:26:14,830 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2112fa7dac4041a4a771c2d85fc1e3bf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130772443 2024-11-20T19:26:14,830 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 8bbd64bbd6984190a786cd601e13bce2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130772443 2024-11-20T19:26:14,857 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#B#compaction#219 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:14,857 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/dd7371fd85f0483c9f7f98876cbf97a4 is 50, key is test_row_0/B:col10/1732130772443/Put/seqid=0 2024-11-20T19:26:14,870 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#A#compaction#220 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:14,871 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/dc0e7a722f1945c7acfd91186cdfd87e is 50, key is test_row_0/A:col10/1732130772443/Put/seqid=0 2024-11-20T19:26:14,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742097_1273 (size=12207) 2024-11-20T19:26:14,943 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:14,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-11-20T19:26:14,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:14,946 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T19:26:14,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:14,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:14,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:14,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:14,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:14,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:14,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742098_1274 (size=12207) 2024-11-20T19:26:14,959 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/dc0e7a722f1945c7acfd91186cdfd87e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/dc0e7a722f1945c7acfd91186cdfd87e 2024-11-20T19:26:14,965 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/A of 05d8fd611fc9337dfa63e932920aeaaa into dc0e7a722f1945c7acfd91186cdfd87e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:14,965 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:14,965 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/A, priority=13, startTime=1732130774827; duration=0sec 2024-11-20T19:26:14,965 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:14,965 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:A 2024-11-20T19:26:14,965 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:14,966 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:14,967 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/C is initiating minor compaction (all files) 2024-11-20T19:26:14,967 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/C in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:14,967 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/377bb4af2e504b4da8c8e0ac4c5c59b4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/4a0cbacaba0a448e9a058db726e9330a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/ad3d676ee9f04fa1a1b8cf8af1441bf2] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=35.3 K 2024-11-20T19:26:14,968 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 377bb4af2e504b4da8c8e0ac4c5c59b4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732130770159 2024-11-20T19:26:14,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/6989f3b6aa454e208badaf3c9a06b4bc is 50, key is test_row_0/A:col10/1732130773575/Put/seqid=0 2024-11-20T19:26:14,970 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a0cbacaba0a448e9a058db726e9330a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732130771310 2024-11-20T19:26:14,971 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad3d676ee9f04fa1a1b8cf8af1441bf2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130772443 2024-11-20T19:26:15,007 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#C#compaction#222 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:15,008 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/70b3aeec342a4cfea8b4c2e53a0c3bc7 is 50, key is test_row_0/C:col10/1732130772443/Put/seqid=0 2024-11-20T19:26:15,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742099_1275 (size=12001) 2024-11-20T19:26:15,014 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/6989f3b6aa454e208badaf3c9a06b4bc 2024-11-20T19:26:15,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742100_1276 (size=12207) 2024-11-20T19:26:15,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/bfdaa9a083a647a2bc57e571475cac54 is 50, key is test_row_0/B:col10/1732130773575/Put/seqid=0 2024-11-20T19:26:15,059 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/70b3aeec342a4cfea8b4c2e53a0c3bc7 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/70b3aeec342a4cfea8b4c2e53a0c3bc7 2024-11-20T19:26:15,066 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/C of 05d8fd611fc9337dfa63e932920aeaaa into 70b3aeec342a4cfea8b4c2e53a0c3bc7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:15,066 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:15,066 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/C, priority=13, startTime=1732130774827; duration=0sec 2024-11-20T19:26:15,066 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:15,066 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:C 2024-11-20T19:26:15,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742101_1277 (size=12001) 2024-11-20T19:26:15,111 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/bfdaa9a083a647a2bc57e571475cac54 2024-11-20T19:26:15,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/38406260cacb4519b4243ec3430d97dc is 50, key is test_row_0/C:col10/1732130773575/Put/seqid=0 2024-11-20T19:26:15,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742102_1278 (size=12001) 2024-11-20T19:26:15,146 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=112 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/38406260cacb4519b4243ec3430d97dc 2024-11-20T19:26:15,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/6989f3b6aa454e208badaf3c9a06b4bc as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/6989f3b6aa454e208badaf3c9a06b4bc 2024-11-20T19:26:15,159 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/6989f3b6aa454e208badaf3c9a06b4bc, entries=150, sequenceid=112, filesize=11.7 K 2024-11-20T19:26:15,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/bfdaa9a083a647a2bc57e571475cac54 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/bfdaa9a083a647a2bc57e571475cac54 2024-11-20T19:26:15,168 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/bfdaa9a083a647a2bc57e571475cac54, entries=150, sequenceid=112, filesize=11.7 K 2024-11-20T19:26:15,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/38406260cacb4519b4243ec3430d97dc as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38406260cacb4519b4243ec3430d97dc 2024-11-20T19:26:15,176 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38406260cacb4519b4243ec3430d97dc, entries=150, sequenceid=112, filesize=11.7 K 2024-11-20T19:26:15,177 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=0 B/0 for 05d8fd611fc9337dfa63e932920aeaaa in 232ms, sequenceid=112, compaction requested=false 2024-11-20T19:26:15,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:15,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:15,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=66 2024-11-20T19:26:15,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=66 2024-11-20T19:26:15,181 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-20T19:26:15,181 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4570 sec 2024-11-20T19:26:15,184 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees in 1.4620 sec 2024-11-20T19:26:15,335 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/dd7371fd85f0483c9f7f98876cbf97a4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/dd7371fd85f0483c9f7f98876cbf97a4 2024-11-20T19:26:15,343 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/B of 05d8fd611fc9337dfa63e932920aeaaa into dd7371fd85f0483c9f7f98876cbf97a4(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:15,343 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:15,343 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/B, priority=13, startTime=1732130774827; duration=0sec 2024-11-20T19:26:15,343 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:15,343 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:B 2024-11-20T19:26:15,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:15,705 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:26:15,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:15,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:15,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:15,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:15,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:15,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:15,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/c19d89fd2b35492ebb2c01b973653e7f is 50, key is test_row_0/A:col10/1732130775703/Put/seqid=0 2024-11-20T19:26:15,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130835728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:15,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130835729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:15,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130835730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:15,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130835730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:15,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130835732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:15,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742103_1279 (size=12001) 2024-11-20T19:26:15,759 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/c19d89fd2b35492ebb2c01b973653e7f 2024-11-20T19:26:15,768 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/b3ca0d2390364c529c972ee348e60d2d is 50, key is test_row_0/B:col10/1732130775703/Put/seqid=0 2024-11-20T19:26:15,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742104_1280 (size=12001) 2024-11-20T19:26:15,812 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/b3ca0d2390364c529c972ee348e60d2d 2024-11-20T19:26:15,824 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/d6483ddc42fa46559c59603d7300cb0e is 50, key is test_row_0/C:col10/1732130775703/Put/seqid=0 2024-11-20T19:26:15,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-20T19:26:15,825 INFO [Thread-1176 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-11-20T19:26:15,825 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:15,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-11-20T19:26:15,827 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:15,828 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:15,828 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:15,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T19:26:15,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130835834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:15,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130835834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:15,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130835835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:15,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130835835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:15,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:15,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130835836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:15,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742105_1281 (size=12001) 2024-11-20T19:26:15,846 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/d6483ddc42fa46559c59603d7300cb0e 2024-11-20T19:26:15,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/c19d89fd2b35492ebb2c01b973653e7f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c19d89fd2b35492ebb2c01b973653e7f 2024-11-20T19:26:15,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c19d89fd2b35492ebb2c01b973653e7f, entries=150, sequenceid=128, filesize=11.7 K 2024-11-20T19:26:15,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/b3ca0d2390364c529c972ee348e60d2d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b3ca0d2390364c529c972ee348e60d2d 2024-11-20T19:26:15,867 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b3ca0d2390364c529c972ee348e60d2d, entries=150, sequenceid=128, filesize=11.7 K 2024-11-20T19:26:15,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/d6483ddc42fa46559c59603d7300cb0e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d6483ddc42fa46559c59603d7300cb0e 2024-11-20T19:26:15,873 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d6483ddc42fa46559c59603d7300cb0e, entries=150, sequenceid=128, filesize=11.7 K 2024-11-20T19:26:15,873 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 05d8fd611fc9337dfa63e932920aeaaa in 168ms, sequenceid=128, compaction requested=true 2024-11-20T19:26:15,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:15,874 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:15,875 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:15,875 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/A is initiating minor compaction (all files) 2024-11-20T19:26:15,875 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/A in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:15,875 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/dc0e7a722f1945c7acfd91186cdfd87e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/6989f3b6aa454e208badaf3c9a06b4bc, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c19d89fd2b35492ebb2c01b973653e7f] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=35.4 K 2024-11-20T19:26:15,875 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc0e7a722f1945c7acfd91186cdfd87e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130772443 2024-11-20T19:26:15,876 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6989f3b6aa454e208badaf3c9a06b4bc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1732130773575 2024-11-20T19:26:15,876 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting c19d89fd2b35492ebb2c01b973653e7f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732130775703 2024-11-20T19:26:15,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:15,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:15,882 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:15,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:15,883 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:15,883 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/B is initiating minor compaction (all files) 2024-11-20T19:26:15,883 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/B in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:15,884 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/dd7371fd85f0483c9f7f98876cbf97a4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/bfdaa9a083a647a2bc57e571475cac54, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b3ca0d2390364c529c972ee348e60d2d] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=35.4 K 2024-11-20T19:26:15,884 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting dd7371fd85f0483c9f7f98876cbf97a4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130772443 2024-11-20T19:26:15,884 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting bfdaa9a083a647a2bc57e571475cac54, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1732130773575 2024-11-20T19:26:15,884 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting b3ca0d2390364c529c972ee348e60d2d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732130775703 2024-11-20T19:26:15,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:15,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:15,885 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:15,887 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#A#compaction#228 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:15,887 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/e748ea77912e410f8bd97934806d6d18 is 50, key is test_row_0/A:col10/1732130775703/Put/seqid=0 2024-11-20T19:26:15,905 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#B#compaction#229 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:15,906 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/d83471f4418c4053b1d7ff3aa7d13cbb is 50, key is test_row_0/B:col10/1732130775703/Put/seqid=0 2024-11-20T19:26:15,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T19:26:15,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742106_1282 (size=12309) 2024-11-20T19:26:15,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742107_1283 (size=12309) 2024-11-20T19:26:15,980 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:15,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-11-20T19:26:15,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:15,981 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T19:26:15,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:15,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:15,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:15,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:15,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:15,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:15,987 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/d83471f4418c4053b1d7ff3aa7d13cbb as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d83471f4418c4053b1d7ff3aa7d13cbb 2024-11-20T19:26:15,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/a07c26eaeaa04655bb862778399582c0 is 50, key is test_row_0/A:col10/1732130775730/Put/seqid=0 2024-11-20T19:26:16,000 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/B of 05d8fd611fc9337dfa63e932920aeaaa into d83471f4418c4053b1d7ff3aa7d13cbb(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:16,000 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:16,000 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/B, priority=13, startTime=1732130775880; duration=0sec 2024-11-20T19:26:16,000 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:16,000 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:B 2024-11-20T19:26:16,000 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:16,010 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:16,010 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/C is initiating minor compaction (all files) 2024-11-20T19:26:16,011 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/C in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:16,011 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/70b3aeec342a4cfea8b4c2e53a0c3bc7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38406260cacb4519b4243ec3430d97dc, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d6483ddc42fa46559c59603d7300cb0e] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=35.4 K 2024-11-20T19:26:16,011 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 70b3aeec342a4cfea8b4c2e53a0c3bc7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130772443 2024-11-20T19:26:16,012 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 38406260cacb4519b4243ec3430d97dc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=112, earliestPutTs=1732130773575 2024-11-20T19:26:16,013 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting d6483ddc42fa46559c59603d7300cb0e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732130775703 2024-11-20T19:26:16,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742108_1284 (size=12151) 2024-11-20T19:26:16,023 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/a07c26eaeaa04655bb862778399582c0 2024-11-20T19:26:16,035 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#C#compaction#231 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:16,035 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/7e8c5abd725146c18fc1b48e6d492388 is 50, key is test_row_0/C:col10/1732130775703/Put/seqid=0 2024-11-20T19:26:16,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:16,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:16,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/9c47e0fad07942af82643097320bcc46 is 50, key is test_row_0/B:col10/1732130775730/Put/seqid=0 2024-11-20T19:26:16,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742110_1286 (size=12151) 2024-11-20T19:26:16,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130836053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,063 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/9c47e0fad07942af82643097320bcc46 2024-11-20T19:26:16,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130836055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130836059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130836072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130836072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742109_1285 (size=12309) 2024-11-20T19:26:16,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/8a2c92b3e91f4fba9f6e8906ebebee3c is 50, key is test_row_0/C:col10/1732130775730/Put/seqid=0 2024-11-20T19:26:16,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742111_1287 (size=12151) 2024-11-20T19:26:16,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T19:26:16,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130836164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130836168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,175 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130836173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130836181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130836181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,371 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/e748ea77912e410f8bd97934806d6d18 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/e748ea77912e410f8bd97934806d6d18 2024-11-20T19:26:16,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130836369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130836372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,379 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/A of 05d8fd611fc9337dfa63e932920aeaaa into e748ea77912e410f8bd97934806d6d18(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:16,379 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:16,379 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/A, priority=13, startTime=1732130775873; duration=0sec 2024-11-20T19:26:16,380 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:16,380 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:A 2024-11-20T19:26:16,383 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130836377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130836385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130836385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T19:26:16,495 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/7e8c5abd725146c18fc1b48e6d492388 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/7e8c5abd725146c18fc1b48e6d492388 2024-11-20T19:26:16,501 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/C of 05d8fd611fc9337dfa63e932920aeaaa into 7e8c5abd725146c18fc1b48e6d492388(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:16,501 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:16,501 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/C, priority=13, startTime=1732130775885; duration=0sec 2024-11-20T19:26:16,501 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:16,501 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:C 2024-11-20T19:26:16,512 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/8a2c92b3e91f4fba9f6e8906ebebee3c 2024-11-20T19:26:16,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/a07c26eaeaa04655bb862778399582c0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/a07c26eaeaa04655bb862778399582c0 2024-11-20T19:26:16,522 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/a07c26eaeaa04655bb862778399582c0, entries=150, sequenceid=151, filesize=11.9 K 2024-11-20T19:26:16,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/9c47e0fad07942af82643097320bcc46 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9c47e0fad07942af82643097320bcc46 2024-11-20T19:26:16,529 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9c47e0fad07942af82643097320bcc46, entries=150, sequenceid=151, filesize=11.9 K 2024-11-20T19:26:16,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/8a2c92b3e91f4fba9f6e8906ebebee3c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/8a2c92b3e91f4fba9f6e8906ebebee3c 2024-11-20T19:26:16,537 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/8a2c92b3e91f4fba9f6e8906ebebee3c, entries=150, sequenceid=151, filesize=11.9 K 2024-11-20T19:26:16,538 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 05d8fd611fc9337dfa63e932920aeaaa in 557ms, sequenceid=151, compaction requested=false 2024-11-20T19:26:16,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:16,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:16,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-11-20T19:26:16,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-11-20T19:26:16,540 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-20T19:26:16,540 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 711 msec 2024-11-20T19:26:16,542 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 715 msec 2024-11-20T19:26:16,676 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T19:26:16,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:16,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:16,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:16,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:16,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:16,677 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:16,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:16,685 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/f2d2b20c38534d2180b8356c36bd8cb5 is 50, key is test_row_0/A:col10/1732130776053/Put/seqid=0 2024-11-20T19:26:16,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130836705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130836705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130836708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130836711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130836712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742112_1288 (size=12151) 2024-11-20T19:26:16,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130836813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130836814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130836815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130836818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:16,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130836819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:16,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-20T19:26:16,931 INFO [Thread-1176 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-20T19:26:16,933 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:17,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130837017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130837020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130837021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130837022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130837023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-11-20T19:26:17,085 INFO [AsyncFSWAL-0-hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData-prefix:db9c3a6c6492,36861,1732130702494 {}] wal.AbstractFSWAL(1183): Slow sync cost: 151 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46289,DS-670dd707-aa81-4325-a709-18eec342bc77,DISK]] 2024-11-20T19:26:17,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T19:26:17,087 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:17,088 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:17,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:17,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/f2d2b20c38534d2180b8356c36bd8cb5 2024-11-20T19:26:17,166 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/f5366a0bed634bbe8e0793e4a97fddeb is 50, key is test_row_0/B:col10/1732130776053/Put/seqid=0 2024-11-20T19:26:17,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T19:26:17,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742113_1289 (size=12151) 2024-11-20T19:26:17,244 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T19:26:17,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:17,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:17,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:17,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130837321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130837324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130837324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130837325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130837329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T19:26:17,397 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T19:26:17,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:17,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:17,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:17,398 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,550 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,550 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T19:26:17,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:17,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:17,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:17,551 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,610 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/f5366a0bed634bbe8e0793e4a97fddeb 2024-11-20T19:26:17,619 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/1a63b25e568f4256954fc6a73ebb4dc4 is 50, key is test_row_0/C:col10/1732130776053/Put/seqid=0 2024-11-20T19:26:17,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742114_1290 (size=12151) 2024-11-20T19:26:17,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T19:26:17,704 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,704 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T19:26:17,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:17,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:17,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:17,704 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130837845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130837845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130837845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130837845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:17,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130837845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,857 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:17,857 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T19:26:17,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:17,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:17,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:17,857 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:17,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,009 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:18,010 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T19:26:18,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:18,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:18,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:18,010 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:18,066 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/1a63b25e568f4256954fc6a73ebb4dc4 2024-11-20T19:26:18,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/f2d2b20c38534d2180b8356c36bd8cb5 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/f2d2b20c38534d2180b8356c36bd8cb5 2024-11-20T19:26:18,076 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/f2d2b20c38534d2180b8356c36bd8cb5, entries=150, sequenceid=169, filesize=11.9 K 2024-11-20T19:26:18,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/f5366a0bed634bbe8e0793e4a97fddeb as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/f5366a0bed634bbe8e0793e4a97fddeb 2024-11-20T19:26:18,084 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/f5366a0bed634bbe8e0793e4a97fddeb, entries=150, sequenceid=169, filesize=11.9 K 2024-11-20T19:26:18,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/1a63b25e568f4256954fc6a73ebb4dc4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/1a63b25e568f4256954fc6a73ebb4dc4 2024-11-20T19:26:18,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/1a63b25e568f4256954fc6a73ebb4dc4, entries=150, sequenceid=169, filesize=11.9 K 2024-11-20T19:26:18,092 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 05d8fd611fc9337dfa63e932920aeaaa in 1416ms, sequenceid=169, compaction requested=true 2024-11-20T19:26:18,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:18,092 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:18,093 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36611 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:18,093 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/A is initiating minor compaction (all files) 2024-11-20T19:26:18,093 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/A in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:18,093 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/e748ea77912e410f8bd97934806d6d18, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/a07c26eaeaa04655bb862778399582c0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/f2d2b20c38534d2180b8356c36bd8cb5] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=35.8 K 2024-11-20T19:26:18,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:18,094 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting e748ea77912e410f8bd97934806d6d18, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732130775703 2024-11-20T19:26:18,094 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting a07c26eaeaa04655bb862778399582c0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1732130775729 2024-11-20T19:26:18,094 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting f2d2b20c38534d2180b8356c36bd8cb5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732130776048 2024-11-20T19:26:18,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:18,096 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:18,097 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36611 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:18,098 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/B is initiating minor compaction (all files) 2024-11-20T19:26:18,098 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/B in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:18,098 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d83471f4418c4053b1d7ff3aa7d13cbb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9c47e0fad07942af82643097320bcc46, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/f5366a0bed634bbe8e0793e4a97fddeb] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=35.8 K 2024-11-20T19:26:18,098 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting d83471f4418c4053b1d7ff3aa7d13cbb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732130775703 2024-11-20T19:26:18,099 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c47e0fad07942af82643097320bcc46, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1732130775729 2024-11-20T19:26:18,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:18,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:18,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:18,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:18,100 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting f5366a0bed634bbe8e0793e4a97fddeb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732130776048 2024-11-20T19:26:18,109 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#A#compaction#237 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:18,110 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/d137baf231644f3194b4f4e229e32540 is 50, key is test_row_0/A:col10/1732130776053/Put/seqid=0 2024-11-20T19:26:18,116 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#B#compaction#238 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:18,117 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/6fe06271bd854a6c9b8b959c8d36223f is 50, key is test_row_0/B:col10/1732130776053/Put/seqid=0 2024-11-20T19:26:18,162 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:18,163 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-20T19:26:18,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:18,163 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T19:26:18,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:18,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:18,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:18,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:18,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:18,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:18,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742115_1291 (size=12561) 2024-11-20T19:26:18,175 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/d137baf231644f3194b4f4e229e32540 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d137baf231644f3194b4f4e229e32540 2024-11-20T19:26:18,185 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/A of 05d8fd611fc9337dfa63e932920aeaaa into d137baf231644f3194b4f4e229e32540(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:18,185 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:18,185 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/A, priority=13, startTime=1732130778092; duration=0sec 2024-11-20T19:26:18,185 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:18,185 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:A 2024-11-20T19:26:18,186 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:18,188 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36611 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:18,188 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/C is initiating minor compaction (all files) 2024-11-20T19:26:18,188 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/C in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:18,189 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/7e8c5abd725146c18fc1b48e6d492388, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/8a2c92b3e91f4fba9f6e8906ebebee3c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/1a63b25e568f4256954fc6a73ebb4dc4] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=35.8 K 2024-11-20T19:26:18,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742116_1292 (size=12561) 2024-11-20T19:26:18,190 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e8c5abd725146c18fc1b48e6d492388, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732130775703 2024-11-20T19:26:18,190 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a2c92b3e91f4fba9f6e8906ebebee3c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1732130775729 2024-11-20T19:26:18,191 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a63b25e568f4256954fc6a73ebb4dc4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732130776048 2024-11-20T19:26:18,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T19:26:18,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/99eed924d1244a78b869e6b520694db4 is 50, key is test_row_0/A:col10/1732130776707/Put/seqid=0 2024-11-20T19:26:18,200 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/6fe06271bd854a6c9b8b959c8d36223f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/6fe06271bd854a6c9b8b959c8d36223f 2024-11-20T19:26:18,208 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/B of 05d8fd611fc9337dfa63e932920aeaaa into 6fe06271bd854a6c9b8b959c8d36223f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:18,208 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:18,208 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/B, priority=13, startTime=1732130778096; duration=0sec 2024-11-20T19:26:18,208 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:18,208 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:B 2024-11-20T19:26:18,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742117_1293 (size=12151) 2024-11-20T19:26:18,221 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/99eed924d1244a78b869e6b520694db4 2024-11-20T19:26:18,229 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#C#compaction#240 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:18,231 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/d315dc808b8f4eb69077070d9418a21e is 50, key is test_row_0/C:col10/1732130776053/Put/seqid=0 2024-11-20T19:26:18,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/e961359b25f54a3492e821cb7eb33bbe is 50, key is test_row_0/B:col10/1732130776707/Put/seqid=0 2024-11-20T19:26:18,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742118_1294 (size=12561) 2024-11-20T19:26:18,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742119_1295 (size=12151) 2024-11-20T19:26:18,310 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/d315dc808b8f4eb69077070d9418a21e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d315dc808b8f4eb69077070d9418a21e 2024-11-20T19:26:18,312 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/e961359b25f54a3492e821cb7eb33bbe 2024-11-20T19:26:18,324 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/C of 05d8fd611fc9337dfa63e932920aeaaa into d315dc808b8f4eb69077070d9418a21e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:18,324 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:18,324 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/C, priority=13, startTime=1732130778099; duration=0sec 2024-11-20T19:26:18,325 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:18,325 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:C 2024-11-20T19:26:18,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/5621df6a528f40baa9b3ffca33ff6cf7 is 50, key is test_row_0/C:col10/1732130776707/Put/seqid=0 2024-11-20T19:26:18,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742120_1296 (size=12151) 2024-11-20T19:26:18,765 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=191 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/5621df6a528f40baa9b3ffca33ff6cf7 2024-11-20T19:26:18,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/99eed924d1244a78b869e6b520694db4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/99eed924d1244a78b869e6b520694db4 2024-11-20T19:26:18,783 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/99eed924d1244a78b869e6b520694db4, entries=150, sequenceid=191, filesize=11.9 K 2024-11-20T19:26:18,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/e961359b25f54a3492e821cb7eb33bbe as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e961359b25f54a3492e821cb7eb33bbe 2024-11-20T19:26:18,801 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e961359b25f54a3492e821cb7eb33bbe, entries=150, sequenceid=191, filesize=11.9 K 2024-11-20T19:26:18,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/5621df6a528f40baa9b3ffca33ff6cf7 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/5621df6a528f40baa9b3ffca33ff6cf7 2024-11-20T19:26:18,818 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/5621df6a528f40baa9b3ffca33ff6cf7, entries=150, sequenceid=191, filesize=11.9 K 2024-11-20T19:26:18,820 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for 05d8fd611fc9337dfa63e932920aeaaa in 657ms, sequenceid=191, compaction requested=false 2024-11-20T19:26:18,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:18,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:18,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-20T19:26:18,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-11-20T19:26:18,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-20T19:26:18,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7330 sec 2024-11-20T19:26:18,827 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 1.8920 sec 2024-11-20T19:26:18,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:18,857 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:26:18,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:18,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:18,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:18,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:18,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:18,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:18,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/d15bf91b62c741729fd6c88bb714fd81 is 50, key is test_row_0/A:col10/1732130778853/Put/seqid=0 2024-11-20T19:26:18,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130838882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:18,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130838882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:18,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130838884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:18,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130838885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:18,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130838888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:18,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742121_1297 (size=9757) 2024-11-20T19:26:18,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130838988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:18,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130838988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:18,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130838989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:18,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130838990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:18,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:18,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130838990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:19,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-20T19:26:19,192 INFO [Thread-1176 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-20T19:26:19,193 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:19,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-20T19:26:19,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T19:26:19,196 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:19,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130839192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:19,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130839192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:19,197 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:19,197 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:19,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130839193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:19,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130839194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:19,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130839193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:19,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/d15bf91b62c741729fd6c88bb714fd81 2024-11-20T19:26:19,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T19:26:19,324 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/7053c3b16d4d47478b421dfce215085b is 50, key is test_row_0/B:col10/1732130778853/Put/seqid=0 2024-11-20T19:26:19,348 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:19,349 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T19:26:19,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:19,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:19,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:19,349 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742122_1298 (size=9757) 2024-11-20T19:26:19,366 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/7053c3b16d4d47478b421dfce215085b 2024-11-20T19:26:19,384 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/f74e137b8f3040c98accfd3551cc85e4 is 50, key is test_row_0/C:col10/1732130778853/Put/seqid=0 2024-11-20T19:26:19,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742123_1299 (size=9757) 2024-11-20T19:26:19,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T19:26:19,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130839497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:19,502 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:19,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T19:26:19,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:19,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:19,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:19,502 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130839499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:19,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130839500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:19,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130839500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:19,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:19,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130839501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:19,661 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:19,662 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T19:26:19,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:19,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:19,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:19,662 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T19:26:19,814 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:19,814 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T19:26:19,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:19,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:19,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:19,815 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:19,841 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/f74e137b8f3040c98accfd3551cc85e4 2024-11-20T19:26:19,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/d15bf91b62c741729fd6c88bb714fd81 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d15bf91b62c741729fd6c88bb714fd81 2024-11-20T19:26:19,850 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d15bf91b62c741729fd6c88bb714fd81, entries=100, sequenceid=205, filesize=9.5 K 2024-11-20T19:26:19,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/7053c3b16d4d47478b421dfce215085b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/7053c3b16d4d47478b421dfce215085b 2024-11-20T19:26:19,855 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/7053c3b16d4d47478b421dfce215085b, entries=100, sequenceid=205, filesize=9.5 K 2024-11-20T19:26:19,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/f74e137b8f3040c98accfd3551cc85e4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/f74e137b8f3040c98accfd3551cc85e4 2024-11-20T19:26:19,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/f74e137b8f3040c98accfd3551cc85e4, entries=100, sequenceid=205, filesize=9.5 K 2024-11-20T19:26:19,864 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 05d8fd611fc9337dfa63e932920aeaaa in 1007ms, sequenceid=205, compaction requested=true 2024-11-20T19:26:19,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:19,864 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:19,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:19,866 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:19,866 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/A is initiating minor compaction (all files) 2024-11-20T19:26:19,866 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/A in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:19,866 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d137baf231644f3194b4f4e229e32540, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/99eed924d1244a78b869e6b520694db4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d15bf91b62c741729fd6c88bb714fd81] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=33.7 K 2024-11-20T19:26:19,866 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d137baf231644f3194b4f4e229e32540, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732130776048 2024-11-20T19:26:19,867 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99eed924d1244a78b869e6b520694db4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1732130776703 2024-11-20T19:26:19,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:19,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:19,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:19,867 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:19,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:19,867 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:19,867 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d15bf91b62c741729fd6c88bb714fd81, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732130778853 2024-11-20T19:26:19,868 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:19,868 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/B is initiating minor compaction (all files) 2024-11-20T19:26:19,868 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/B in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:19,868 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/6fe06271bd854a6c9b8b959c8d36223f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e961359b25f54a3492e821cb7eb33bbe, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/7053c3b16d4d47478b421dfce215085b] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=33.7 K 2024-11-20T19:26:19,869 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fe06271bd854a6c9b8b959c8d36223f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732130776048 2024-11-20T19:26:19,869 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting e961359b25f54a3492e821cb7eb33bbe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1732130776703 2024-11-20T19:26:19,869 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 7053c3b16d4d47478b421dfce215085b, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732130778853 2024-11-20T19:26:19,877 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#B#compaction#246 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:19,878 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/e431074230474a258c928728de0d76ab is 50, key is test_row_0/B:col10/1732130778853/Put/seqid=0 2024-11-20T19:26:19,881 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#A#compaction#247 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:19,881 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/7977c0a5d0564a42abf9e4dfdbce39be is 50, key is test_row_0/A:col10/1732130778853/Put/seqid=0 2024-11-20T19:26:19,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742124_1300 (size=12663) 2024-11-20T19:26:19,935 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/e431074230474a258c928728de0d76ab as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e431074230474a258c928728de0d76ab 2024-11-20T19:26:19,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742125_1301 (size=12663) 2024-11-20T19:26:19,942 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/B of 05d8fd611fc9337dfa63e932920aeaaa into e431074230474a258c928728de0d76ab(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:19,942 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:19,942 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/B, priority=13, startTime=1732130779867; duration=0sec 2024-11-20T19:26:19,942 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:19,942 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:B 2024-11-20T19:26:19,942 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:19,945 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:19,945 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/C is initiating minor compaction (all files) 2024-11-20T19:26:19,945 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/C in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:19,945 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d315dc808b8f4eb69077070d9418a21e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/5621df6a528f40baa9b3ffca33ff6cf7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/f74e137b8f3040c98accfd3551cc85e4] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=33.7 K 2024-11-20T19:26:19,952 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting d315dc808b8f4eb69077070d9418a21e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732130776048 2024-11-20T19:26:19,953 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 5621df6a528f40baa9b3ffca33ff6cf7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=191, earliestPutTs=1732130776703 2024-11-20T19:26:19,955 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting f74e137b8f3040c98accfd3551cc85e4, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732130778853 2024-11-20T19:26:19,957 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/7977c0a5d0564a42abf9e4dfdbce39be as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/7977c0a5d0564a42abf9e4dfdbce39be 2024-11-20T19:26:19,966 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:19,966 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-20T19:26:19,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:19,967 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:26:19,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:19,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:19,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:19,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:19,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:19,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:19,966 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/A of 05d8fd611fc9337dfa63e932920aeaaa into 7977c0a5d0564a42abf9e4dfdbce39be(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:19,968 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:19,968 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/A, priority=13, startTime=1732130779864; duration=0sec 2024-11-20T19:26:19,969 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:19,969 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:A 2024-11-20T19:26:19,977 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#C#compaction#249 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:19,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/0eb3d831fa764533bec44e8330c1ae89 is 50, key is test_row_0/A:col10/1732130778885/Put/seqid=0 2024-11-20T19:26:19,978 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/9f35914114464065a5b285859d3a91d6 is 50, key is test_row_0/C:col10/1732130778853/Put/seqid=0 2024-11-20T19:26:19,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742126_1302 (size=12151) 2024-11-20T19:26:20,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:20,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:20,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742127_1303 (size=12663) 2024-11-20T19:26:20,013 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/0eb3d831fa764533bec44e8330c1ae89 2024-11-20T19:26:20,018 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130840014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/8599d282c5964fe2b8c0ef3bcb430c1a is 50, key is test_row_0/B:col10/1732130778885/Put/seqid=0 2024-11-20T19:26:20,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130840017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130840018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130840017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130840018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742128_1304 (size=12151) 2024-11-20T19:26:20,027 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/8599d282c5964fe2b8c0ef3bcb430c1a 2024-11-20T19:26:20,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/d359adfeb1ef4ea49a600b3bf06c0269 is 50, key is test_row_0/C:col10/1732130778885/Put/seqid=0 2024-11-20T19:26:20,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742129_1305 (size=12151) 2024-11-20T19:26:20,046 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/d359adfeb1ef4ea49a600b3bf06c0269 2024-11-20T19:26:20,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/0eb3d831fa764533bec44e8330c1ae89 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/0eb3d831fa764533bec44e8330c1ae89 2024-11-20T19:26:20,056 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/0eb3d831fa764533bec44e8330c1ae89, entries=150, sequenceid=232, filesize=11.9 K 2024-11-20T19:26:20,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/8599d282c5964fe2b8c0ef3bcb430c1a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8599d282c5964fe2b8c0ef3bcb430c1a 2024-11-20T19:26:20,064 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8599d282c5964fe2b8c0ef3bcb430c1a, entries=150, sequenceid=232, filesize=11.9 K 2024-11-20T19:26:20,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/d359adfeb1ef4ea49a600b3bf06c0269 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d359adfeb1ef4ea49a600b3bf06c0269 2024-11-20T19:26:20,071 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d359adfeb1ef4ea49a600b3bf06c0269, entries=150, sequenceid=232, filesize=11.9 K 2024-11-20T19:26:20,072 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 05d8fd611fc9337dfa63e932920aeaaa in 105ms, sequenceid=232, compaction requested=false 2024-11-20T19:26:20,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:20,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:20,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-20T19:26:20,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-20T19:26:20,075 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-20T19:26:20,075 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 876 msec 2024-11-20T19:26:20,076 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 883 msec 2024-11-20T19:26:20,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:20,125 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:26:20,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:20,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:20,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:20,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:20,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:20,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:20,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/ea45af8bb33e4942ab7674d4c36492b8 is 50, key is test_row_0/A:col10/1732130780017/Put/seqid=0 2024-11-20T19:26:20,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130840151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130840152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130840159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130840159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130840160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742130_1306 (size=12151) 2024-11-20T19:26:20,174 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/ea45af8bb33e4942ab7674d4c36492b8 2024-11-20T19:26:20,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/a98075c22d8644018b804a2bb85ddfb5 is 50, key is test_row_0/B:col10/1732130780017/Put/seqid=0 2024-11-20T19:26:20,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742131_1307 (size=12151) 2024-11-20T19:26:20,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130840260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130840260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130840265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130840266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130840266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-20T19:26:20,299 INFO [Thread-1176 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-20T19:26:20,300 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:20,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-20T19:26:20,302 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:20,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T19:26:20,302 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:20,303 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:20,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T19:26:20,422 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/9f35914114464065a5b285859d3a91d6 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/9f35914114464065a5b285859d3a91d6 2024-11-20T19:26:20,427 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/C of 05d8fd611fc9337dfa63e932920aeaaa into 9f35914114464065a5b285859d3a91d6(size=12.4 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:20,427 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:20,427 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/C, priority=13, startTime=1732130779867; duration=0sec 2024-11-20T19:26:20,428 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:20,428 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:C 2024-11-20T19:26:20,455 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T19:26:20,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:20,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:20,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:20,456 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:20,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:20,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:20,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130840464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130840465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130840470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130840470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,470 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130840470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T19:26:20,607 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T19:26:20,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:20,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:20,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:20,608 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:20,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:20,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:20,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/a98075c22d8644018b804a2bb85ddfb5 2024-11-20T19:26:20,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/de1d30685c014e76ad83a9cdbfe3287f is 50, key is test_row_0/C:col10/1732130780017/Put/seqid=0 2024-11-20T19:26:20,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742132_1308 (size=12151) 2024-11-20T19:26:20,745 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/de1d30685c014e76ad83a9cdbfe3287f 2024-11-20T19:26:20,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/ea45af8bb33e4942ab7674d4c36492b8 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/ea45af8bb33e4942ab7674d4c36492b8 2024-11-20T19:26:20,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/ea45af8bb33e4942ab7674d4c36492b8, entries=150, sequenceid=247, filesize=11.9 K 2024-11-20T19:26:20,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/a98075c22d8644018b804a2bb85ddfb5 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a98075c22d8644018b804a2bb85ddfb5 2024-11-20T19:26:20,759 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a98075c22d8644018b804a2bb85ddfb5, entries=150, sequenceid=247, filesize=11.9 K 2024-11-20T19:26:20,760 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T19:26:20,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:20,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/de1d30685c014e76ad83a9cdbfe3287f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/de1d30685c014e76ad83a9cdbfe3287f 2024-11-20T19:26:20,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:20,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:20,761 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:20,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:20,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:20,766 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/de1d30685c014e76ad83a9cdbfe3287f, entries=150, sequenceid=247, filesize=11.9 K 2024-11-20T19:26:20,768 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 05d8fd611fc9337dfa63e932920aeaaa in 642ms, sequenceid=247, compaction requested=true 2024-11-20T19:26:20,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:20,768 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:20,768 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:20,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:20,769 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:20,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:20,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:20,769 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:20,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:20,770 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/A is initiating minor compaction (all files) 2024-11-20T19:26:20,770 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/A in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:20,770 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/7977c0a5d0564a42abf9e4dfdbce39be, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/0eb3d831fa764533bec44e8330c1ae89, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/ea45af8bb33e4942ab7674d4c36492b8] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=36.1 K 2024-11-20T19:26:20,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:20,770 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:20,770 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7977c0a5d0564a42abf9e4dfdbce39be, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732130776703 2024-11-20T19:26:20,771 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/B is initiating minor compaction (all files) 2024-11-20T19:26:20,771 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/B in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:20,771 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e431074230474a258c928728de0d76ab, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8599d282c5964fe2b8c0ef3bcb430c1a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a98075c22d8644018b804a2bb85ddfb5] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=36.1 K 2024-11-20T19:26:20,771 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0eb3d831fa764533bec44e8330c1ae89, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1732130778880 2024-11-20T19:26:20,771 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting e431074230474a258c928728de0d76ab, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732130776703 2024-11-20T19:26:20,771 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea45af8bb33e4942ab7674d4c36492b8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732130780017 2024-11-20T19:26:20,772 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 8599d282c5964fe2b8c0ef3bcb430c1a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1732130778880 2024-11-20T19:26:20,773 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a98075c22d8644018b804a2bb85ddfb5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732130780017 2024-11-20T19:26:20,773 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T19:26:20,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:20,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:20,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:20,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:20,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:20,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:20,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:20,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130840786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130840786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130840787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130840787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,793 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130840788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/b18223ef8c1d4d3c9dbe5596f9ab457e is 50, key is test_row_0/A:col10/1732130780151/Put/seqid=0 2024-11-20T19:26:20,813 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#A#compaction#256 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:20,813 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/c61809783be9453cb91b339ba42bc63b is 50, key is test_row_0/A:col10/1732130780017/Put/seqid=0 2024-11-20T19:26:20,832 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#B#compaction#257 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:20,832 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/04a8321584b84536a8e718e3514ad7a9 is 50, key is test_row_0/B:col10/1732130780017/Put/seqid=0 2024-11-20T19:26:20,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742133_1309 (size=14741) 2024-11-20T19:26:20,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742134_1310 (size=12765) 2024-11-20T19:26:20,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130840893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130840893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130840893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130840894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:20,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130840896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T19:26:20,912 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:20,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742135_1311 (size=12765) 2024-11-20T19:26:20,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T19:26:20,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:20,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:20,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:20,913 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:20,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:20,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:20,921 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/04a8321584b84536a8e718e3514ad7a9 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/04a8321584b84536a8e718e3514ad7a9 2024-11-20T19:26:20,928 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/B of 05d8fd611fc9337dfa63e932920aeaaa into 04a8321584b84536a8e718e3514ad7a9(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:20,928 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:20,928 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/B, priority=13, startTime=1732130780769; duration=0sec 2024-11-20T19:26:20,928 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:20,928 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:B 2024-11-20T19:26:20,928 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:20,929 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:20,929 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/C is initiating minor compaction (all files) 2024-11-20T19:26:20,930 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/C in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:20,930 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/9f35914114464065a5b285859d3a91d6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d359adfeb1ef4ea49a600b3bf06c0269, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/de1d30685c014e76ad83a9cdbfe3287f] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=36.1 K 2024-11-20T19:26:20,930 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f35914114464065a5b285859d3a91d6, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1732130776703 2024-11-20T19:26:20,930 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting d359adfeb1ef4ea49a600b3bf06c0269, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1732130778880 2024-11-20T19:26:20,931 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting de1d30685c014e76ad83a9cdbfe3287f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732130780017 2024-11-20T19:26:20,950 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#C#compaction#258 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:20,950 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/2fe20ae141974fd08af8e2ea8658bf1a is 50, key is test_row_0/C:col10/1732130780017/Put/seqid=0 2024-11-20T19:26:21,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742136_1312 (size=12765) 2024-11-20T19:26:21,027 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/2fe20ae141974fd08af8e2ea8658bf1a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/2fe20ae141974fd08af8e2ea8658bf1a 2024-11-20T19:26:21,044 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/C of 05d8fd611fc9337dfa63e932920aeaaa into 2fe20ae141974fd08af8e2ea8658bf1a(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:21,044 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:21,044 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/C, priority=13, startTime=1732130780769; duration=0sec 2024-11-20T19:26:21,044 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:21,044 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:C 2024-11-20T19:26:21,066 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T19:26:21,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:21,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:21,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:21,067 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:21,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:21,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:21,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130841097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130841097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130841097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130841098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130841102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,219 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T19:26:21,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:21,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:21,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:21,220 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:21,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:21,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:21,281 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/b18223ef8c1d4d3c9dbe5596f9ab457e 2024-11-20T19:26:21,290 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/b1acc767926c4202a9f089b0c60a91a7 is 50, key is test_row_0/B:col10/1732130780151/Put/seqid=0 2024-11-20T19:26:21,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742137_1313 (size=12301) 2024-11-20T19:26:21,302 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/c61809783be9453cb91b339ba42bc63b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c61809783be9453cb91b339ba42bc63b 2024-11-20T19:26:21,304 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/b1acc767926c4202a9f089b0c60a91a7 2024-11-20T19:26:21,309 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/A of 05d8fd611fc9337dfa63e932920aeaaa into c61809783be9453cb91b339ba42bc63b(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:21,309 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:21,309 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/A, priority=13, startTime=1732130780768; duration=0sec 2024-11-20T19:26:21,309 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:21,309 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:A 2024-11-20T19:26:21,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/ff0bb47871324e71b0e33266dfaf947e is 50, key is test_row_0/C:col10/1732130780151/Put/seqid=0 2024-11-20T19:26:21,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742138_1314 (size=12301) 2024-11-20T19:26:21,340 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/ff0bb47871324e71b0e33266dfaf947e 2024-11-20T19:26:21,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/b18223ef8c1d4d3c9dbe5596f9ab457e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/b18223ef8c1d4d3c9dbe5596f9ab457e 2024-11-20T19:26:21,358 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/b18223ef8c1d4d3c9dbe5596f9ab457e, entries=200, sequenceid=271, filesize=14.4 K 2024-11-20T19:26:21,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/b1acc767926c4202a9f089b0c60a91a7 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b1acc767926c4202a9f089b0c60a91a7 2024-11-20T19:26:21,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b1acc767926c4202a9f089b0c60a91a7, entries=150, sequenceid=271, filesize=12.0 K 2024-11-20T19:26:21,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/ff0bb47871324e71b0e33266dfaf947e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/ff0bb47871324e71b0e33266dfaf947e 2024-11-20T19:26:21,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/ff0bb47871324e71b0e33266dfaf947e, entries=150, sequenceid=271, filesize=12.0 K 2024-11-20T19:26:21,371 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-20T19:26:21,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 05d8fd611fc9337dfa63e932920aeaaa in 599ms, sequenceid=271, compaction requested=false 2024-11-20T19:26:21,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:21,372 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,373 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-20T19:26:21,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:21,373 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:26:21,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:21,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:21,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:21,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:21,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:21,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:21,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/87d8166141104f26a753936f56ff5ab3 is 50, key is test_row_0/A:col10/1732130780778/Put/seqid=0 2024-11-20T19:26:21,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:21,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:21,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T19:26:21,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742139_1315 (size=12301) 2024-11-20T19:26:21,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130841424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130841425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130841425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130841426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130841427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130841530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130841530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130841530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130841530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130841532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130841733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130841734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130841734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130841734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:21,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130841735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:21,824 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/87d8166141104f26a753936f56ff5ab3 2024-11-20T19:26:21,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/3905e77381a14805b2d02fbf29ae976d is 50, key is test_row_0/B:col10/1732130780778/Put/seqid=0 2024-11-20T19:26:21,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742140_1316 (size=12301) 2024-11-20T19:26:21,877 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/3905e77381a14805b2d02fbf29ae976d 2024-11-20T19:26:21,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/1d5734ed25f84e01aa8612a39109ff76 is 50, key is test_row_0/C:col10/1732130780778/Put/seqid=0 2024-11-20T19:26:21,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742141_1317 (size=12301) 2024-11-20T19:26:21,916 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/1d5734ed25f84e01aa8612a39109ff76 2024-11-20T19:26:21,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/87d8166141104f26a753936f56ff5ab3 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/87d8166141104f26a753936f56ff5ab3 2024-11-20T19:26:21,928 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/87d8166141104f26a753936f56ff5ab3, entries=150, sequenceid=287, filesize=12.0 K 2024-11-20T19:26:21,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/3905e77381a14805b2d02fbf29ae976d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/3905e77381a14805b2d02fbf29ae976d 2024-11-20T19:26:21,963 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/3905e77381a14805b2d02fbf29ae976d, entries=150, sequenceid=287, filesize=12.0 K 2024-11-20T19:26:21,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/1d5734ed25f84e01aa8612a39109ff76 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/1d5734ed25f84e01aa8612a39109ff76 2024-11-20T19:26:21,970 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/1d5734ed25f84e01aa8612a39109ff76, entries=150, sequenceid=287, filesize=12.0 K 2024-11-20T19:26:21,972 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 05d8fd611fc9337dfa63e932920aeaaa in 599ms, sequenceid=287, compaction requested=true 2024-11-20T19:26:21,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:21,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:21,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-20T19:26:21,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-20T19:26:21,975 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-20T19:26:21,976 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6710 sec 2024-11-20T19:26:21,978 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.6770 sec 2024-11-20T19:26:22,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:22,041 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T19:26:22,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:22,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:22,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:22,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:22,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:22,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:22,052 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/539b910e9832471eb3bfed67e7e4fb98 is 50, key is test_row_0/A:col10/1732130782041/Put/seqid=0 2024-11-20T19:26:22,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130842048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130842048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130842051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,057 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130842053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130842053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742142_1318 (size=14741) 2024-11-20T19:26:22,070 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/539b910e9832471eb3bfed67e7e4fb98 2024-11-20T19:26:22,081 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/895f8bca252e4879a42c833609c958bb is 50, key is test_row_0/B:col10/1732130782041/Put/seqid=0 2024-11-20T19:26:22,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742143_1319 (size=12301) 2024-11-20T19:26:22,089 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/895f8bca252e4879a42c833609c958bb 2024-11-20T19:26:22,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/234f45cbd4694bfca651233b8c7ca553 is 50, key is test_row_0/C:col10/1732130782041/Put/seqid=0 2024-11-20T19:26:22,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742144_1320 (size=12301) 2024-11-20T19:26:22,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/234f45cbd4694bfca651233b8c7ca553 2024-11-20T19:26:22,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/539b910e9832471eb3bfed67e7e4fb98 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/539b910e9832471eb3bfed67e7e4fb98 2024-11-20T19:26:22,154 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/539b910e9832471eb3bfed67e7e4fb98, entries=200, sequenceid=312, filesize=14.4 K 2024-11-20T19:26:22,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/895f8bca252e4879a42c833609c958bb as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/895f8bca252e4879a42c833609c958bb 2024-11-20T19:26:22,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130842154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130842154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130842156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130842158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130842159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,164 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/895f8bca252e4879a42c833609c958bb, entries=150, sequenceid=312, filesize=12.0 K 2024-11-20T19:26:22,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/234f45cbd4694bfca651233b8c7ca553 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/234f45cbd4694bfca651233b8c7ca553 2024-11-20T19:26:22,184 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/234f45cbd4694bfca651233b8c7ca553, entries=150, sequenceid=312, filesize=12.0 K 2024-11-20T19:26:22,185 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 05d8fd611fc9337dfa63e932920aeaaa in 143ms, sequenceid=312, compaction requested=true 2024-11-20T19:26:22,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:22,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:22,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:22,185 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:22,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:22,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:22,185 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:22,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:22,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:22,186 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54548 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:22,186 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/A is initiating minor compaction (all files) 2024-11-20T19:26:22,186 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/A in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:22,187 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c61809783be9453cb91b339ba42bc63b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/b18223ef8c1d4d3c9dbe5596f9ab457e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/87d8166141104f26a753936f56ff5ab3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/539b910e9832471eb3bfed67e7e4fb98] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=53.3 K 2024-11-20T19:26:22,187 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting c61809783be9453cb91b339ba42bc63b, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732130780017 2024-11-20T19:26:22,187 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49668 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:22,187 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/B is initiating minor compaction (all files) 2024-11-20T19:26:22,187 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/B in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:22,187 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/04a8321584b84536a8e718e3514ad7a9, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b1acc767926c4202a9f089b0c60a91a7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/3905e77381a14805b2d02fbf29ae976d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/895f8bca252e4879a42c833609c958bb] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=48.5 K 2024-11-20T19:26:22,188 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 04a8321584b84536a8e718e3514ad7a9, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732130780017 2024-11-20T19:26:22,188 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting b18223ef8c1d4d3c9dbe5596f9ab457e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1732130780151 2024-11-20T19:26:22,188 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting b1acc767926c4202a9f089b0c60a91a7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1732130780151 2024-11-20T19:26:22,188 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87d8166141104f26a753936f56ff5ab3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732130780778 2024-11-20T19:26:22,189 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 3905e77381a14805b2d02fbf29ae976d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732130780778 2024-11-20T19:26:22,189 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 539b910e9832471eb3bfed67e7e4fb98, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732130781424 2024-11-20T19:26:22,189 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 895f8bca252e4879a42c833609c958bb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732130781424 2024-11-20T19:26:22,203 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#A#compaction#267 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:22,204 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/28d4499a310146c4b7ce7a6a5eea857c is 50, key is test_row_0/A:col10/1732130782041/Put/seqid=0 2024-11-20T19:26:22,205 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#B#compaction#268 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:22,205 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/0c3d7035a3544413a139ba7fc69a7ad9 is 50, key is test_row_0/B:col10/1732130782041/Put/seqid=0 2024-11-20T19:26:22,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742145_1321 (size=13051) 2024-11-20T19:26:22,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742146_1322 (size=13051) 2024-11-20T19:26:22,263 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/28d4499a310146c4b7ce7a6a5eea857c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/28d4499a310146c4b7ce7a6a5eea857c 2024-11-20T19:26:22,270 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/A of 05d8fd611fc9337dfa63e932920aeaaa into 28d4499a310146c4b7ce7a6a5eea857c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:22,270 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:22,270 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/A, priority=12, startTime=1732130782185; duration=0sec 2024-11-20T19:26:22,270 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:22,270 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:A 2024-11-20T19:26:22,271 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:22,272 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49668 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:22,272 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/C is initiating minor compaction (all files) 2024-11-20T19:26:22,272 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/C in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:22,272 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/2fe20ae141974fd08af8e2ea8658bf1a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/ff0bb47871324e71b0e33266dfaf947e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/1d5734ed25f84e01aa8612a39109ff76, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/234f45cbd4694bfca651233b8c7ca553] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=48.5 K 2024-11-20T19:26:22,273 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2fe20ae141974fd08af8e2ea8658bf1a, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732130780017 2024-11-20T19:26:22,273 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff0bb47871324e71b0e33266dfaf947e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1732130780151 2024-11-20T19:26:22,273 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d5734ed25f84e01aa8612a39109ff76, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1732130780778 2024-11-20T19:26:22,274 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 234f45cbd4694bfca651233b8c7ca553, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732130781424 2024-11-20T19:26:22,289 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#C#compaction#269 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:22,289 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/29aa7a1ad6984937b50153a349c50f74 is 50, key is test_row_0/C:col10/1732130782041/Put/seqid=0 2024-11-20T19:26:22,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742147_1323 (size=13051) 2024-11-20T19:26:22,338 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/29aa7a1ad6984937b50153a349c50f74 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/29aa7a1ad6984937b50153a349c50f74 2024-11-20T19:26:22,344 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/C of 05d8fd611fc9337dfa63e932920aeaaa into 29aa7a1ad6984937b50153a349c50f74(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:22,344 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:22,344 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/C, priority=12, startTime=1732130782185; duration=0sec 2024-11-20T19:26:22,344 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:22,344 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:C 2024-11-20T19:26:22,362 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:26:22,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:22,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:22,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:22,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:22,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:22,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:22,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:22,367 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/c12c3d01d27f407bbfbdff8a22884a68 is 50, key is test_row_0/A:col10/1732130782045/Put/seqid=0 2024-11-20T19:26:22,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130842389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130842390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130842391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130842392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130842393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-20T19:26:22,406 INFO [Thread-1176 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-20T19:26:22,407 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:22,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-20T19:26:22,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T19:26:22,410 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:22,410 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:22,410 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:22,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742148_1324 (size=12301) 2024-11-20T19:26:22,413 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/c12c3d01d27f407bbfbdff8a22884a68 2024-11-20T19:26:22,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/dd36d00f07274268a2935dd48f5781f3 is 50, key is test_row_0/B:col10/1732130782045/Put/seqid=0 2024-11-20T19:26:22,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742149_1325 (size=12301) 2024-11-20T19:26:22,465 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/dd36d00f07274268a2935dd48f5781f3 2024-11-20T19:26:22,482 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/7b22a3ddf8134ea9954af8155ac58047 is 50, key is test_row_0/C:col10/1732130782045/Put/seqid=0 2024-11-20T19:26:22,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130842494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130842497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130842497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130842497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130842500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T19:26:22,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742150_1326 (size=12301) 2024-11-20T19:26:22,518 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/7b22a3ddf8134ea9954af8155ac58047 2024-11-20T19:26:22,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/c12c3d01d27f407bbfbdff8a22884a68 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c12c3d01d27f407bbfbdff8a22884a68 2024-11-20T19:26:22,530 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c12c3d01d27f407bbfbdff8a22884a68, entries=150, sequenceid=327, filesize=12.0 K 2024-11-20T19:26:22,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/dd36d00f07274268a2935dd48f5781f3 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/dd36d00f07274268a2935dd48f5781f3 2024-11-20T19:26:22,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/dd36d00f07274268a2935dd48f5781f3, entries=150, sequenceid=327, filesize=12.0 K 2024-11-20T19:26:22,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/7b22a3ddf8134ea9954af8155ac58047 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/7b22a3ddf8134ea9954af8155ac58047 2024-11-20T19:26:22,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/7b22a3ddf8134ea9954af8155ac58047, entries=150, sequenceid=327, filesize=12.0 K 2024-11-20T19:26:22,555 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 05d8fd611fc9337dfa63e932920aeaaa in 193ms, sequenceid=327, compaction requested=false 2024-11-20T19:26:22,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:22,565 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,565 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-20T19:26:22,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:22,566 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T19:26:22,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:22,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:22,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:22,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:22,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:22,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:22,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/d089ee2616ff4039aedade0e561690e0 is 50, key is test_row_0/A:col10/1732130782392/Put/seqid=0 2024-11-20T19:26:22,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742151_1327 (size=12301) 2024-11-20T19:26:22,637 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/d089ee2616ff4039aedade0e561690e0 2024-11-20T19:26:22,661 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/0c3d7035a3544413a139ba7fc69a7ad9 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/0c3d7035a3544413a139ba7fc69a7ad9 2024-11-20T19:26:22,667 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/B of 05d8fd611fc9337dfa63e932920aeaaa into 0c3d7035a3544413a139ba7fc69a7ad9(size=12.7 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:22,667 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:22,667 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/B, priority=12, startTime=1732130782185; duration=0sec 2024-11-20T19:26:22,667 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:22,667 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:B 2024-11-20T19:26:22,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/572bc72e3a0641659d13510ed68d8c51 is 50, key is test_row_0/B:col10/1732130782392/Put/seqid=0 2024-11-20T19:26:22,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:22,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:22,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T19:26:22,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130842712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130842712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130842713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130842714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742152_1328 (size=12301) 2024-11-20T19:26:22,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130842719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,727 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/572bc72e3a0641659d13510ed68d8c51 2024-11-20T19:26:22,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/33b47e5fc8a5448f81602fae7c2015dc is 50, key is test_row_0/C:col10/1732130782392/Put/seqid=0 2024-11-20T19:26:22,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742153_1329 (size=12301) 2024-11-20T19:26:22,777 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/33b47e5fc8a5448f81602fae7c2015dc 2024-11-20T19:26:22,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/d089ee2616ff4039aedade0e561690e0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d089ee2616ff4039aedade0e561690e0 2024-11-20T19:26:22,791 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d089ee2616ff4039aedade0e561690e0, entries=150, sequenceid=350, filesize=12.0 K 2024-11-20T19:26:22,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/572bc72e3a0641659d13510ed68d8c51 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/572bc72e3a0641659d13510ed68d8c51 2024-11-20T19:26:22,802 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/572bc72e3a0641659d13510ed68d8c51, entries=150, sequenceid=350, filesize=12.0 K 2024-11-20T19:26:22,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/33b47e5fc8a5448f81602fae7c2015dc as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/33b47e5fc8a5448f81602fae7c2015dc 2024-11-20T19:26:22,809 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/33b47e5fc8a5448f81602fae7c2015dc, entries=150, sequenceid=350, filesize=12.0 K 2024-11-20T19:26:22,810 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 05d8fd611fc9337dfa63e932920aeaaa in 244ms, sequenceid=350, compaction requested=true 2024-11-20T19:26:22,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:22,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:22,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-20T19:26:22,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-20T19:26:22,812 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-20T19:26:22,812 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 401 msec 2024-11-20T19:26:22,813 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 406 msec 2024-11-20T19:26:22,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:22,822 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T19:26:22,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:22,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:22,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:22,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:22,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:22,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:22,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/85c0c021c6374bbeb60b8a6a92264ca1 is 50, key is test_row_0/A:col10/1732130782713/Put/seqid=0 2024-11-20T19:26:22,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742154_1330 (size=17181) 2024-11-20T19:26:22,843 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/85c0c021c6374bbeb60b8a6a92264ca1 2024-11-20T19:26:22,850 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/a11a3c5611d946a7a9d3083005e84cef is 50, key is test_row_0/B:col10/1732130782713/Put/seqid=0 2024-11-20T19:26:22,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130842845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130842847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130842847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130842853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130842853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742155_1331 (size=12301) 2024-11-20T19:26:22,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130842954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130842955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130842955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130842960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:22,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:22,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130842960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-20T19:26:23,011 INFO [Thread-1176 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-20T19:26:23,012 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:23,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-20T19:26:23,014 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:23,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T19:26:23,015 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:23,015 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:23,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T19:26:23,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130843159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130843159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130843159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,166 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,167 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T19:26:23,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:23,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:23,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:23,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,167 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:23,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:23,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130843165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130843165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:23,288 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/a11a3c5611d946a7a9d3083005e84cef 2024-11-20T19:26:23,307 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/992c56511cc14b63ac3575a516cc8acf is 50, key is test_row_0/C:col10/1732130782713/Put/seqid=0 2024-11-20T19:26:23,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T19:26:23,319 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742156_1332 (size=12301) 2024-11-20T19:26:23,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T19:26:23,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:23,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:23,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:23,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:23,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:23,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:23,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130843463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130843465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130843465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130843470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130843471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,473 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T19:26:23,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:23,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:23,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:23,474 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:23,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:23,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:23,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T19:26:23,626 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T19:26:23,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:23,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:23,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:23,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:23,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:23,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:23,722 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/992c56511cc14b63ac3575a516cc8acf 2024-11-20T19:26:23,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/85c0c021c6374bbeb60b8a6a92264ca1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/85c0c021c6374bbeb60b8a6a92264ca1 2024-11-20T19:26:23,736 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/85c0c021c6374bbeb60b8a6a92264ca1, entries=250, sequenceid=367, filesize=16.8 K 2024-11-20T19:26:23,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/a11a3c5611d946a7a9d3083005e84cef as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a11a3c5611d946a7a9d3083005e84cef 2024-11-20T19:26:23,744 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a11a3c5611d946a7a9d3083005e84cef, entries=150, sequenceid=367, filesize=12.0 K 2024-11-20T19:26:23,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/992c56511cc14b63ac3575a516cc8acf as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/992c56511cc14b63ac3575a516cc8acf 2024-11-20T19:26:23,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/992c56511cc14b63ac3575a516cc8acf, entries=150, sequenceid=367, filesize=12.0 K 2024-11-20T19:26:23,749 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 05d8fd611fc9337dfa63e932920aeaaa in 928ms, sequenceid=367, compaction requested=true 2024-11-20T19:26:23,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:23,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:23,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:23,749 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:23,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:23,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:23,749 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:23,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:23,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:23,752 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:23,752 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/B is initiating minor compaction (all files) 2024-11-20T19:26:23,752 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54834 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:23,752 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/A is initiating minor compaction (all files) 2024-11-20T19:26:23,752 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/B in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:23,752 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/A in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:23,752 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/0c3d7035a3544413a139ba7fc69a7ad9, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/dd36d00f07274268a2935dd48f5781f3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/572bc72e3a0641659d13510ed68d8c51, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a11a3c5611d946a7a9d3083005e84cef] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=48.8 K 2024-11-20T19:26:23,752 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/28d4499a310146c4b7ce7a6a5eea857c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c12c3d01d27f407bbfbdff8a22884a68, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d089ee2616ff4039aedade0e561690e0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/85c0c021c6374bbeb60b8a6a92264ca1] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=53.5 K 2024-11-20T19:26:23,752 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28d4499a310146c4b7ce7a6a5eea857c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732130781424 2024-11-20T19:26:23,753 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c3d7035a3544413a139ba7fc69a7ad9, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732130781424 2024-11-20T19:26:23,753 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting c12c3d01d27f407bbfbdff8a22884a68, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732130782045 2024-11-20T19:26:23,753 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting dd36d00f07274268a2935dd48f5781f3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732130782045 2024-11-20T19:26:23,753 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d089ee2616ff4039aedade0e561690e0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732130782381 2024-11-20T19:26:23,753 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 572bc72e3a0641659d13510ed68d8c51, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732130782381 2024-11-20T19:26:23,753 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85c0c021c6374bbeb60b8a6a92264ca1, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732130782710 2024-11-20T19:26:23,754 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a11a3c5611d946a7a9d3083005e84cef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732130782713 2024-11-20T19:26:23,769 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#A#compaction#279 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:23,770 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/a725c3aa6f974dc6893652ae4c818599 is 50, key is test_row_0/A:col10/1732130782713/Put/seqid=0 2024-11-20T19:26:23,779 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-20T19:26:23,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:23,780 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T19:26:23,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:23,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:23,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:23,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:23,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:23,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:23,781 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#B#compaction#280 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:23,782 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/8305c5ec8b804bdfb2f1a78c5f881ac1 is 50, key is test_row_0/B:col10/1732130782713/Put/seqid=0 2024-11-20T19:26:23,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/171efe92a0ef4eaaa6e77f195931542b is 50, key is test_row_0/A:col10/1732130782852/Put/seqid=0 2024-11-20T19:26:23,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742157_1333 (size=13187) 2024-11-20T19:26:23,832 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/a725c3aa6f974dc6893652ae4c818599 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/a725c3aa6f974dc6893652ae4c818599 2024-11-20T19:26:23,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742158_1334 (size=13187) 2024-11-20T19:26:23,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742159_1335 (size=12301) 2024-11-20T19:26:23,835 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=388 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/171efe92a0ef4eaaa6e77f195931542b 2024-11-20T19:26:23,841 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/A of 05d8fd611fc9337dfa63e932920aeaaa into a725c3aa6f974dc6893652ae4c818599(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:23,841 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:23,841 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/A, priority=12, startTime=1732130783749; duration=0sec 2024-11-20T19:26:23,841 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:23,841 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:A 2024-11-20T19:26:23,841 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:23,846 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/8305c5ec8b804bdfb2f1a78c5f881ac1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8305c5ec8b804bdfb2f1a78c5f881ac1 2024-11-20T19:26:23,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/b5cf9b53c0354df8814ec96049443efa is 50, key is test_row_0/B:col10/1732130782852/Put/seqid=0 2024-11-20T19:26:23,851 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:23,851 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/C is initiating minor compaction (all files) 2024-11-20T19:26:23,851 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/C in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:23,852 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/29aa7a1ad6984937b50153a349c50f74, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/7b22a3ddf8134ea9954af8155ac58047, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/33b47e5fc8a5448f81602fae7c2015dc, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/992c56511cc14b63ac3575a516cc8acf] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=48.8 K 2024-11-20T19:26:23,853 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/B of 05d8fd611fc9337dfa63e932920aeaaa into 8305c5ec8b804bdfb2f1a78c5f881ac1(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:23,853 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:23,853 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/B, priority=12, startTime=1732130783749; duration=0sec 2024-11-20T19:26:23,853 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:23,853 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:B 2024-11-20T19:26:23,853 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29aa7a1ad6984937b50153a349c50f74, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732130781424 2024-11-20T19:26:23,854 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b22a3ddf8134ea9954af8155ac58047, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732130782045 2024-11-20T19:26:23,854 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33b47e5fc8a5448f81602fae7c2015dc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732130782381 2024-11-20T19:26:23,854 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 992c56511cc14b63ac3575a516cc8acf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732130782713 2024-11-20T19:26:23,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742160_1336 (size=12301) 2024-11-20T19:26:23,879 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=388 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/b5cf9b53c0354df8814ec96049443efa 2024-11-20T19:26:23,887 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#C#compaction#283 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:23,887 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/8817965b906b4ce0becfd6a96bbaed1a is 50, key is test_row_0/C:col10/1732130782713/Put/seqid=0 2024-11-20T19:26:23,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/6221c04b13cf484594324639bee6cfdf is 50, key is test_row_0/C:col10/1732130782852/Put/seqid=0 2024-11-20T19:26:23,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742161_1337 (size=13187) 2024-11-20T19:26:23,921 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/8817965b906b4ce0becfd6a96bbaed1a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/8817965b906b4ce0becfd6a96bbaed1a 2024-11-20T19:26:23,932 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/C of 05d8fd611fc9337dfa63e932920aeaaa into 8817965b906b4ce0becfd6a96bbaed1a(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:23,932 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:23,932 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/C, priority=12, startTime=1732130783749; duration=0sec 2024-11-20T19:26:23,932 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:23,932 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:C 2024-11-20T19:26:23,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742162_1338 (size=12301) 2024-11-20T19:26:23,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:23,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:23,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130843978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130843978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130843979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130843981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:23,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:23,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130843983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130844084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130844084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130844084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130844086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130844087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T19:26:24,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130844287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130844288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130844288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130844289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130844289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,337 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=388 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/6221c04b13cf484594324639bee6cfdf 2024-11-20T19:26:24,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/171efe92a0ef4eaaa6e77f195931542b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/171efe92a0ef4eaaa6e77f195931542b 2024-11-20T19:26:24,352 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/171efe92a0ef4eaaa6e77f195931542b, entries=150, sequenceid=388, filesize=12.0 K 2024-11-20T19:26:24,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/b5cf9b53c0354df8814ec96049443efa as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b5cf9b53c0354df8814ec96049443efa 2024-11-20T19:26:24,360 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b5cf9b53c0354df8814ec96049443efa, entries=150, sequenceid=388, filesize=12.0 K 2024-11-20T19:26:24,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/6221c04b13cf484594324639bee6cfdf as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6221c04b13cf484594324639bee6cfdf 2024-11-20T19:26:24,366 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6221c04b13cf484594324639bee6cfdf, entries=150, sequenceid=388, filesize=12.0 K 2024-11-20T19:26:24,367 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 05d8fd611fc9337dfa63e932920aeaaa in 587ms, sequenceid=388, compaction requested=false 2024-11-20T19:26:24,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:24,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:24,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-20T19:26:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-20T19:26:24,370 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-20T19:26:24,370 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3530 sec 2024-11-20T19:26:24,374 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.3600 sec 2024-11-20T19:26:24,621 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-20T19:26:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:24,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:24,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:24,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:24,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:24,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:24,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:24,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/8b5fe3e184ed40b5b1bd13ee4e89d16c is 50, key is test_row_0/A:col10/1732130784621/Put/seqid=0 2024-11-20T19:26:24,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742163_1339 (size=14741) 2024-11-20T19:26:24,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130844636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130844638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130844640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130844640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130844641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130844740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130844741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130844743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130844748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130844748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130844945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130844946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130844949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130844950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:24,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:24,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130844950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,034 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/8b5fe3e184ed40b5b1bd13ee4e89d16c 2024-11-20T19:26:25,045 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/2a32a54ce413476dbe2d6e11d920465d is 50, key is test_row_0/B:col10/1732130784621/Put/seqid=0 2024-11-20T19:26:25,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742164_1340 (size=12301) 2024-11-20T19:26:25,101 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/2a32a54ce413476dbe2d6e11d920465d 2024-11-20T19:26:25,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-20T19:26:25,119 INFO [Thread-1176 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-20T19:26:25,121 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:25,122 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/38e9f11f6fec4427841a06c88a6d365e is 50, key is test_row_0/C:col10/1732130784621/Put/seqid=0 2024-11-20T19:26:25,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-20T19:26:25,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T19:26:25,123 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:25,124 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:25,124 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:25,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742165_1341 (size=12301) 2024-11-20T19:26:25,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/38e9f11f6fec4427841a06c88a6d365e 2024-11-20T19:26:25,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/8b5fe3e184ed40b5b1bd13ee4e89d16c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/8b5fe3e184ed40b5b1bd13ee4e89d16c 2024-11-20T19:26:25,193 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/8b5fe3e184ed40b5b1bd13ee4e89d16c, entries=200, sequenceid=409, filesize=14.4 K 2024-11-20T19:26:25,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/2a32a54ce413476dbe2d6e11d920465d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/2a32a54ce413476dbe2d6e11d920465d 2024-11-20T19:26:25,199 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/2a32a54ce413476dbe2d6e11d920465d, entries=150, sequenceid=409, filesize=12.0 K 2024-11-20T19:26:25,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/38e9f11f6fec4427841a06c88a6d365e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38e9f11f6fec4427841a06c88a6d365e 2024-11-20T19:26:25,204 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38e9f11f6fec4427841a06c88a6d365e, entries=150, sequenceid=409, filesize=12.0 K 2024-11-20T19:26:25,205 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 05d8fd611fc9337dfa63e932920aeaaa in 585ms, sequenceid=409, compaction requested=true 2024-11-20T19:26:25,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:25,205 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:25,206 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40229 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:25,206 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/A is initiating minor compaction (all files) 2024-11-20T19:26:25,206 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/A in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:25,206 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/a725c3aa6f974dc6893652ae4c818599, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/171efe92a0ef4eaaa6e77f195931542b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/8b5fe3e184ed40b5b1bd13ee4e89d16c] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=39.3 K 2024-11-20T19:26:25,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:25,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:25,207 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:25,207 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting a725c3aa6f974dc6893652ae4c818599, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732130782713 2024-11-20T19:26:25,207 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 171efe92a0ef4eaaa6e77f195931542b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=388, earliestPutTs=1732130782845 2024-11-20T19:26:25,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:25,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:25,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:25,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:25,208 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:25,208 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/B is initiating minor compaction (all files) 2024-11-20T19:26:25,208 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/B in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:25,208 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8305c5ec8b804bdfb2f1a78c5f881ac1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b5cf9b53c0354df8814ec96049443efa, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/2a32a54ce413476dbe2d6e11d920465d] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=36.9 K 2024-11-20T19:26:25,208 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b5fe3e184ed40b5b1bd13ee4e89d16c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732130783981 2024-11-20T19:26:25,209 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 8305c5ec8b804bdfb2f1a78c5f881ac1, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732130782713 2024-11-20T19:26:25,209 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting b5cf9b53c0354df8814ec96049443efa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=388, earliestPutTs=1732130782845 2024-11-20T19:26:25,210 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a32a54ce413476dbe2d6e11d920465d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732130783981 2024-11-20T19:26:25,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T19:26:25,233 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#B#compaction#288 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:25,234 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/5aa12eb314c84762838165c715a0d910 is 50, key is test_row_0/B:col10/1732130784621/Put/seqid=0 2024-11-20T19:26:25,251 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#A#compaction#289 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:25,252 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/5664dc8624d54452aa871e29a46f54a3 is 50, key is test_row_0/A:col10/1732130784621/Put/seqid=0 2024-11-20T19:26:25,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:25,256 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T19:26:25,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:25,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:25,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:25,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:25,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:25,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:25,275 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,275 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T19:26:25,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:25,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:25,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:25,276 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,281 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/d0a67573018d4c4285798ba67eccb5eb is 50, key is test_row_0/A:col10/1732130785256/Put/seqid=0 2024-11-20T19:26:25,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130845282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130845283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130845285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130845285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130845285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742166_1342 (size=13289) 2024-11-20T19:26:25,304 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/5aa12eb314c84762838165c715a0d910 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/5aa12eb314c84762838165c715a0d910 2024-11-20T19:26:25,311 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/B of 05d8fd611fc9337dfa63e932920aeaaa into 5aa12eb314c84762838165c715a0d910(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:25,311 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:25,311 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/B, priority=13, startTime=1732130785207; duration=0sec 2024-11-20T19:26:25,311 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:25,311 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:B 2024-11-20T19:26:25,311 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:25,312 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:25,312 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/C is initiating minor compaction (all files) 2024-11-20T19:26:25,312 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/C in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:25,312 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/8817965b906b4ce0becfd6a96bbaed1a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6221c04b13cf484594324639bee6cfdf, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38e9f11f6fec4427841a06c88a6d365e] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=36.9 K 2024-11-20T19:26:25,313 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 8817965b906b4ce0becfd6a96bbaed1a, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732130782713 2024-11-20T19:26:25,313 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 6221c04b13cf484594324639bee6cfdf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=388, earliestPutTs=1732130782845 2024-11-20T19:26:25,313 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 38e9f11f6fec4427841a06c88a6d365e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732130783981 2024-11-20T19:26:25,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742167_1343 (size=13289) 2024-11-20T19:26:25,335 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/5664dc8624d54452aa871e29a46f54a3 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/5664dc8624d54452aa871e29a46f54a3 2024-11-20T19:26:25,342 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/A of 05d8fd611fc9337dfa63e932920aeaaa into 5664dc8624d54452aa871e29a46f54a3(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:25,342 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:25,342 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/A, priority=13, startTime=1732130785205; duration=0sec 2024-11-20T19:26:25,342 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:25,342 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:A 2024-11-20T19:26:25,349 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#C#compaction#291 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:25,349 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/958d92cccd5e4ecbb23a5ff86b8c1c93 is 50, key is test_row_0/C:col10/1732130784621/Put/seqid=0 2024-11-20T19:26:25,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742168_1344 (size=17181) 2024-11-20T19:26:25,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130845392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742169_1345 (size=13289) 2024-11-20T19:26:25,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130845393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130845393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130845394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130845394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T19:26:25,428 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,428 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T19:26:25,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:25,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:25,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:25,429 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,580 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,581 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T19:26:25,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:25,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:25,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:25,581 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130845599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130845599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130845600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130845601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130845601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T19:26:25,734 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T19:26:25,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:25,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:25,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:25,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,765 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/d0a67573018d4c4285798ba67eccb5eb 2024-11-20T19:26:25,784 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/0418a0551824431983ce54857569cc5d is 50, key is test_row_0/B:col10/1732130785256/Put/seqid=0 2024-11-20T19:26:25,805 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/958d92cccd5e4ecbb23a5ff86b8c1c93 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/958d92cccd5e4ecbb23a5ff86b8c1c93 2024-11-20T19:26:25,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742170_1346 (size=12301) 2024-11-20T19:26:25,818 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/C of 05d8fd611fc9337dfa63e932920aeaaa into 958d92cccd5e4ecbb23a5ff86b8c1c93(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:25,818 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:25,818 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/C, priority=13, startTime=1732130785208; duration=0sec 2024-11-20T19:26:25,819 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:25,819 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:C 2024-11-20T19:26:25,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/0418a0551824431983ce54857569cc5d 2024-11-20T19:26:25,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/94fc1fc6a90f49a9811f4827a1465c52 is 50, key is test_row_0/C:col10/1732130785256/Put/seqid=0 2024-11-20T19:26:25,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742171_1347 (size=12301) 2024-11-20T19:26:25,884 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=429 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/94fc1fc6a90f49a9811f4827a1465c52 2024-11-20T19:26:25,887 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T19:26:25,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:25,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:25,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:25,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:25,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/d0a67573018d4c4285798ba67eccb5eb as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d0a67573018d4c4285798ba67eccb5eb 2024-11-20T19:26:25,904 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d0a67573018d4c4285798ba67eccb5eb, entries=250, sequenceid=429, filesize=16.8 K 2024-11-20T19:26:25,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130845903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130845904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130845904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130845905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/0418a0551824431983ce54857569cc5d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/0418a0551824431983ce54857569cc5d 2024-11-20T19:26:25,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:25,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130845906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:25,913 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/0418a0551824431983ce54857569cc5d, entries=150, sequenceid=429, filesize=12.0 K 2024-11-20T19:26:25,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/94fc1fc6a90f49a9811f4827a1465c52 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/94fc1fc6a90f49a9811f4827a1465c52 2024-11-20T19:26:25,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/94fc1fc6a90f49a9811f4827a1465c52, entries=150, sequenceid=429, filesize=12.0 K 2024-11-20T19:26:25,922 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=100.63 KB/103050 for 05d8fd611fc9337dfa63e932920aeaaa in 666ms, sequenceid=429, compaction requested=false 2024-11-20T19:26:25,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:26,040 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-20T19:26:26,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:26,041 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T19:26:26,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:26,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:26,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:26,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:26,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:26,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:26,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/1c4d7d4be38046cd9cb18413de00db73 is 50, key is test_row_0/A:col10/1732130785284/Put/seqid=0 2024-11-20T19:26:26,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742172_1348 (size=12301) 2024-11-20T19:26:26,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T19:26:26,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:26,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:26,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130846428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130846432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130846432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130846433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130846439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,500 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/1c4d7d4be38046cd9cb18413de00db73 2024-11-20T19:26:26,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/8c186c834dc14eabbdf30063f5ad8b4f is 50, key is test_row_0/B:col10/1732130785284/Put/seqid=0 2024-11-20T19:26:26,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742173_1349 (size=12301) 2024-11-20T19:26:26,532 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/8c186c834dc14eabbdf30063f5ad8b4f 2024-11-20T19:26:26,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130846535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130846536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130846537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,541 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130846540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130846542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/fc346797f678432e9995cf6dcea011ec is 50, key is test_row_0/C:col10/1732130785284/Put/seqid=0 2024-11-20T19:26:26,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742174_1350 (size=12301) 2024-11-20T19:26:26,577 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/fc346797f678432e9995cf6dcea011ec 2024-11-20T19:26:26,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/1c4d7d4be38046cd9cb18413de00db73 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/1c4d7d4be38046cd9cb18413de00db73 2024-11-20T19:26:26,589 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/1c4d7d4be38046cd9cb18413de00db73, entries=150, sequenceid=450, filesize=12.0 K 2024-11-20T19:26:26,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/8c186c834dc14eabbdf30063f5ad8b4f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8c186c834dc14eabbdf30063f5ad8b4f 2024-11-20T19:26:26,596 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8c186c834dc14eabbdf30063f5ad8b4f, entries=150, sequenceid=450, filesize=12.0 K 2024-11-20T19:26:26,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/fc346797f678432e9995cf6dcea011ec as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/fc346797f678432e9995cf6dcea011ec 2024-11-20T19:26:26,613 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/fc346797f678432e9995cf6dcea011ec, entries=150, sequenceid=450, filesize=12.0 K 2024-11-20T19:26:26,615 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 05d8fd611fc9337dfa63e932920aeaaa in 574ms, sequenceid=450, compaction requested=true 2024-11-20T19:26:26,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:26,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:26,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-20T19:26:26,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-20T19:26:26,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-20T19:26:26,618 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4920 sec 2024-11-20T19:26:26,620 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.4970 sec 2024-11-20T19:26:26,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:26,741 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T19:26:26,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:26,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:26,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:26,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:26,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:26,741 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:26,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/8320fe79931945be82f4e71c3e2f83fa is 50, key is test_row_0/A:col10/1732130786417/Put/seqid=0 2024-11-20T19:26:26,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130846763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130846763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130846765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130846771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130846771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742175_1351 (size=12301) 2024-11-20T19:26:26,786 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=469 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/8320fe79931945be82f4e71c3e2f83fa 2024-11-20T19:26:26,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/4ceae5e2a2d644c8ad273b4bc8053173 is 50, key is test_row_0/B:col10/1732130786417/Put/seqid=0 2024-11-20T19:26:26,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742176_1352 (size=12301) 2024-11-20T19:26:26,842 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=469 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/4ceae5e2a2d644c8ad273b4bc8053173 2024-11-20T19:26:26,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/6d958047b7f047a0978d038c703ea14e is 50, key is test_row_0/C:col10/1732130786417/Put/seqid=0 2024-11-20T19:26:26,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130846869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,877 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130846869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130846870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130846875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:26,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130846875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:26,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742177_1353 (size=12301) 2024-11-20T19:26:27,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130847077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130847078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130847078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130847083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130847084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-20T19:26:27,226 INFO [Thread-1176 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-20T19:26:27,228 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:27,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-20T19:26:27,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T19:26:27,230 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:27,231 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:27,231 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:27,302 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=469 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/6d958047b7f047a0978d038c703ea14e 2024-11-20T19:26:27,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/8320fe79931945be82f4e71c3e2f83fa as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/8320fe79931945be82f4e71c3e2f83fa 2024-11-20T19:26:27,316 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/8320fe79931945be82f4e71c3e2f83fa, entries=150, sequenceid=469, filesize=12.0 K 2024-11-20T19:26:27,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/4ceae5e2a2d644c8ad273b4bc8053173 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/4ceae5e2a2d644c8ad273b4bc8053173 2024-11-20T19:26:27,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/4ceae5e2a2d644c8ad273b4bc8053173, entries=150, sequenceid=469, filesize=12.0 K 2024-11-20T19:26:27,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/6d958047b7f047a0978d038c703ea14e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6d958047b7f047a0978d038c703ea14e 2024-11-20T19:26:27,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T19:26:27,332 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6d958047b7f047a0978d038c703ea14e, entries=150, sequenceid=469, filesize=12.0 K 2024-11-20T19:26:27,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 05d8fd611fc9337dfa63e932920aeaaa in 592ms, sequenceid=469, compaction requested=true 2024-11-20T19:26:27,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:27,334 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:27,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:27,336 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55072 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:27,336 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/A is initiating minor compaction (all files) 2024-11-20T19:26:27,336 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/A in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:27,336 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/5664dc8624d54452aa871e29a46f54a3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d0a67573018d4c4285798ba67eccb5eb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/1c4d7d4be38046cd9cb18413de00db73, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/8320fe79931945be82f4e71c3e2f83fa] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=53.8 K 2024-11-20T19:26:27,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:27,336 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:27,337 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5664dc8624d54452aa871e29a46f54a3, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732130783981 2024-11-20T19:26:27,337 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0a67573018d4c4285798ba67eccb5eb, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1732130784636 2024-11-20T19:26:27,338 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c4d7d4be38046cd9cb18413de00db73, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1732130785283 2024-11-20T19:26:27,338 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:27,338 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/B is initiating minor compaction (all files) 2024-11-20T19:26:27,338 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/B in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:27,338 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/5aa12eb314c84762838165c715a0d910, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/0418a0551824431983ce54857569cc5d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8c186c834dc14eabbdf30063f5ad8b4f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/4ceae5e2a2d644c8ad273b4bc8053173] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=49.0 K 2024-11-20T19:26:27,339 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8320fe79931945be82f4e71c3e2f83fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1732130786417 2024-11-20T19:26:27,339 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 5aa12eb314c84762838165c715a0d910, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732130783981 2024-11-20T19:26:27,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:27,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:27,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:27,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:27,339 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 0418a0551824431983ce54857569cc5d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1732130784636 2024-11-20T19:26:27,340 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c186c834dc14eabbdf30063f5ad8b4f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1732130785283 2024-11-20T19:26:27,340 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ceae5e2a2d644c8ad273b4bc8053173, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1732130786417 2024-11-20T19:26:27,355 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#A#compaction#300 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:27,356 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/7fc07c0f339d4fe4a312c203d7f4eb0c is 50, key is test_row_0/A:col10/1732130786417/Put/seqid=0 2024-11-20T19:26:27,381 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#B#compaction#301 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:27,382 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/96cf00cc3ce94f8a8adf1fbbf4befed2 is 50, key is test_row_0/B:col10/1732130786417/Put/seqid=0 2024-11-20T19:26:27,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T19:26:27,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:27,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:27,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:27,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:27,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:27,385 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:27,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:27,387 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T19:26:27,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:27,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:27,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:27,388 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742178_1354 (size=13425) 2024-11-20T19:26:27,402 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/7fc07c0f339d4fe4a312c203d7f4eb0c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/7fc07c0f339d4fe4a312c203d7f4eb0c 2024-11-20T19:26:27,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/42306629606543819d554d8fda3638bb is 50, key is test_row_0/A:col10/1732130786755/Put/seqid=0 2024-11-20T19:26:27,409 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130847402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130847403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130847403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,414 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/A of 05d8fd611fc9337dfa63e932920aeaaa into 7fc07c0f339d4fe4a312c203d7f4eb0c(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:27,414 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:27,414 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/A, priority=12, startTime=1732130787334; duration=0sec 2024-11-20T19:26:27,414 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:27,414 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:A 2024-11-20T19:26:27,414 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:27,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130847409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130847409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,419 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:27,419 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/C is initiating minor compaction (all files) 2024-11-20T19:26:27,419 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/C in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:27,419 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/958d92cccd5e4ecbb23a5ff86b8c1c93, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/94fc1fc6a90f49a9811f4827a1465c52, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/fc346797f678432e9995cf6dcea011ec, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6d958047b7f047a0978d038c703ea14e] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=49.0 K 2024-11-20T19:26:27,420 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 958d92cccd5e4ecbb23a5ff86b8c1c93, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1732130783981 2024-11-20T19:26:27,420 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94fc1fc6a90f49a9811f4827a1465c52, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=429, earliestPutTs=1732130784636 2024-11-20T19:26:27,420 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc346797f678432e9995cf6dcea011ec, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1732130785283 2024-11-20T19:26:27,421 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d958047b7f047a0978d038c703ea14e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1732130786417 2024-11-20T19:26:27,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742179_1355 (size=13425) 2024-11-20T19:26:27,453 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#C#compaction#303 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:27,454 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/b0cf7ec338244d46a323046cda139370 is 50, key is test_row_0/C:col10/1732130786417/Put/seqid=0 2024-11-20T19:26:27,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742180_1356 (size=12301) 2024-11-20T19:26:27,472 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/42306629606543819d554d8fda3638bb 2024-11-20T19:26:27,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742181_1357 (size=13425) 2024-11-20T19:26:27,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130847511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130847511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,514 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/b0cf7ec338244d46a323046cda139370 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/b0cf7ec338244d46a323046cda139370 2024-11-20T19:26:27,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130847512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,519 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/fba3656cdd8245969fe933a5f63a7bf3 is 50, key is test_row_0/B:col10/1732130786755/Put/seqid=0 2024-11-20T19:26:27,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130847519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130847519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,523 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/C of 05d8fd611fc9337dfa63e932920aeaaa into b0cf7ec338244d46a323046cda139370(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:27,523 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:27,523 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/C, priority=12, startTime=1732130787339; duration=0sec 2024-11-20T19:26:27,523 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:27,523 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:C 2024-11-20T19:26:27,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T19:26:27,539 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T19:26:27,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:27,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:27,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:27,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:27,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742182_1358 (size=12301) 2024-11-20T19:26:27,563 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/fba3656cdd8245969fe933a5f63a7bf3 2024-11-20T19:26:27,595 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/3883f3ba9a5841c9b5f67c719c7dd4da is 50, key is test_row_0/C:col10/1732130786755/Put/seqid=0 2024-11-20T19:26:27,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742183_1359 (size=12301) 2024-11-20T19:26:27,630 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/3883f3ba9a5841c9b5f67c719c7dd4da 2024-11-20T19:26:27,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/42306629606543819d554d8fda3638bb as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/42306629606543819d554d8fda3638bb 2024-11-20T19:26:27,643 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/42306629606543819d554d8fda3638bb, entries=150, sequenceid=488, filesize=12.0 K 2024-11-20T19:26:27,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/fba3656cdd8245969fe933a5f63a7bf3 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/fba3656cdd8245969fe933a5f63a7bf3 2024-11-20T19:26:27,649 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/fba3656cdd8245969fe933a5f63a7bf3, entries=150, sequenceid=488, filesize=12.0 K 2024-11-20T19:26:27,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/3883f3ba9a5841c9b5f67c719c7dd4da as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/3883f3ba9a5841c9b5f67c719c7dd4da 2024-11-20T19:26:27,657 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/3883f3ba9a5841c9b5f67c719c7dd4da, entries=150, sequenceid=488, filesize=12.0 K 2024-11-20T19:26:27,658 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 05d8fd611fc9337dfa63e932920aeaaa in 274ms, sequenceid=488, compaction requested=false 2024-11-20T19:26:27,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:27,692 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,693 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-20T19:26:27,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:27,693 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T19:26:27,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:27,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:27,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:27,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:27,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:27,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:27,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/afe3d4010c014393a1d069f2b579d58c is 50, key is test_row_0/A:col10/1732130787395/Put/seqid=0 2024-11-20T19:26:27,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742184_1360 (size=12301) 2024-11-20T19:26:27,718 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/afe3d4010c014393a1d069f2b579d58c 2024-11-20T19:26:27,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:27,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:27,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130847737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130847739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130847741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,748 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130847742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,748 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130847743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/9cc966041ef948969a8ff15275e0086f is 50, key is test_row_0/B:col10/1732130787395/Put/seqid=0 2024-11-20T19:26:27,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742185_1361 (size=12301) 2024-11-20T19:26:27,784 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/9cc966041ef948969a8ff15275e0086f 2024-11-20T19:26:27,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/01a3904dc55a447a898f14e14f9887b3 is 50, key is test_row_0/C:col10/1732130787395/Put/seqid=0 2024-11-20T19:26:27,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742186_1362 (size=12301) 2024-11-20T19:26:27,828 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/01a3904dc55a447a898f14e14f9887b3 2024-11-20T19:26:27,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T19:26:27,845 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/96cf00cc3ce94f8a8adf1fbbf4befed2 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/96cf00cc3ce94f8a8adf1fbbf4befed2 2024-11-20T19:26:27,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130847844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,847 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130847844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/afe3d4010c014393a1d069f2b579d58c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/afe3d4010c014393a1d069f2b579d58c 2024-11-20T19:26:27,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130847848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130847849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,852 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/B of 05d8fd611fc9337dfa63e932920aeaaa into 96cf00cc3ce94f8a8adf1fbbf4befed2(size=13.1 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:27,852 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:27,852 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/B, priority=12, startTime=1732130787336; duration=0sec 2024-11-20T19:26:27,852 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:27,852 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:B 2024-11-20T19:26:27,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130847849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:27,853 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/afe3d4010c014393a1d069f2b579d58c, entries=150, sequenceid=508, filesize=12.0 K 2024-11-20T19:26:27,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/9cc966041ef948969a8ff15275e0086f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9cc966041ef948969a8ff15275e0086f 2024-11-20T19:26:27,869 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9cc966041ef948969a8ff15275e0086f, entries=150, sequenceid=508, filesize=12.0 K 2024-11-20T19:26:27,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/01a3904dc55a447a898f14e14f9887b3 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/01a3904dc55a447a898f14e14f9887b3 2024-11-20T19:26:27,876 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/01a3904dc55a447a898f14e14f9887b3, entries=150, sequenceid=508, filesize=12.0 K 2024-11-20T19:26:27,877 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 05d8fd611fc9337dfa63e932920aeaaa in 184ms, sequenceid=508, compaction requested=true 2024-11-20T19:26:27,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:27,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:27,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-20T19:26:27,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-20T19:26:27,881 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-20T19:26:27,881 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 647 msec 2024-11-20T19:26:27,884 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 654 msec 2024-11-20T19:26:28,052 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T19:26:28,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:28,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:28,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:28,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:28,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:28,052 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:28,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:28,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/170c9ac8e473411c82fcb6562999e99d is 50, key is test_row_0/A:col10/1732130787741/Put/seqid=0 2024-11-20T19:26:28,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130848069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130848070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130848075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130848075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130848075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742187_1363 (size=14741) 2024-11-20T19:26:28,121 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=528 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/170c9ac8e473411c82fcb6562999e99d 2024-11-20T19:26:28,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/a5eac779b3c14e8fb72d779d941d9537 is 50, key is test_row_0/B:col10/1732130787741/Put/seqid=0 2024-11-20T19:26:28,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130848176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130848176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130848182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742188_1364 (size=12301) 2024-11-20T19:26:28,193 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=528 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/a5eac779b3c14e8fb72d779d941d9537 2024-11-20T19:26:28,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/770c9c33c579478083328aeade57faea is 50, key is test_row_0/C:col10/1732130787741/Put/seqid=0 2024-11-20T19:26:28,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130848216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130848219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742189_1365 (size=12301) 2024-11-20T19:26:28,249 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=528 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/770c9c33c579478083328aeade57faea 2024-11-20T19:26:28,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/170c9ac8e473411c82fcb6562999e99d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/170c9ac8e473411c82fcb6562999e99d 2024-11-20T19:26:28,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/170c9ac8e473411c82fcb6562999e99d, entries=200, sequenceid=528, filesize=14.4 K 2024-11-20T19:26:28,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/a5eac779b3c14e8fb72d779d941d9537 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a5eac779b3c14e8fb72d779d941d9537 2024-11-20T19:26:28,270 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a5eac779b3c14e8fb72d779d941d9537, entries=150, sequenceid=528, filesize=12.0 K 2024-11-20T19:26:28,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/770c9c33c579478083328aeade57faea as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/770c9c33c579478083328aeade57faea 2024-11-20T19:26:28,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/770c9c33c579478083328aeade57faea, entries=150, sequenceid=528, filesize=12.0 K 2024-11-20T19:26:28,287 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 05d8fd611fc9337dfa63e932920aeaaa in 235ms, sequenceid=528, compaction requested=true 2024-11-20T19:26:28,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:28,287 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:28,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:28,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:28,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:28,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:28,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:28,287 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:28,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:28,290 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:28,290 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/B is initiating minor compaction (all files) 2024-11-20T19:26:28,290 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/B in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:28,290 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/96cf00cc3ce94f8a8adf1fbbf4befed2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/fba3656cdd8245969fe933a5f63a7bf3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9cc966041ef948969a8ff15275e0086f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a5eac779b3c14e8fb72d779d941d9537] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=49.1 K 2024-11-20T19:26:28,290 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52768 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:28,290 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/A is initiating minor compaction (all files) 2024-11-20T19:26:28,290 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/A in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:28,291 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/7fc07c0f339d4fe4a312c203d7f4eb0c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/42306629606543819d554d8fda3638bb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/afe3d4010c014393a1d069f2b579d58c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/170c9ac8e473411c82fcb6562999e99d] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=51.5 K 2024-11-20T19:26:28,291 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 96cf00cc3ce94f8a8adf1fbbf4befed2, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1732130786417 2024-11-20T19:26:28,291 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fc07c0f339d4fe4a312c203d7f4eb0c, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1732130786417 2024-11-20T19:26:28,291 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting fba3656cdd8245969fe933a5f63a7bf3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732130786755 2024-11-20T19:26:28,292 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42306629606543819d554d8fda3638bb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732130786755 2024-11-20T19:26:28,292 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cc966041ef948969a8ff15275e0086f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=508, earliestPutTs=1732130787395 2024-11-20T19:26:28,292 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting afe3d4010c014393a1d069f2b579d58c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=508, earliestPutTs=1732130787395 2024-11-20T19:26:28,292 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a5eac779b3c14e8fb72d779d941d9537, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1732130787740 2024-11-20T19:26:28,292 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 170c9ac8e473411c82fcb6562999e99d, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1732130787734 2024-11-20T19:26:28,313 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#B#compaction#312 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:28,314 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/e471f8706e874cc9a1a129ad10cc6791 is 50, key is test_row_0/B:col10/1732130787741/Put/seqid=0 2024-11-20T19:26:28,321 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#A#compaction#313 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:28,322 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/ef515ed2d756407cb3b3c1c927d72f4b is 50, key is test_row_0/A:col10/1732130787741/Put/seqid=0 2024-11-20T19:26:28,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-20T19:26:28,333 INFO [Thread-1176 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-20T19:26:28,334 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:28,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-20T19:26:28,336 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:28,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T19:26:28,336 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:28,337 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:28,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742191_1367 (size=13561) 2024-11-20T19:26:28,372 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/ef515ed2d756407cb3b3c1c927d72f4b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/ef515ed2d756407cb3b3c1c927d72f4b 2024-11-20T19:26:28,377 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/A of 05d8fd611fc9337dfa63e932920aeaaa into ef515ed2d756407cb3b3c1c927d72f4b(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:28,377 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:28,377 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/A, priority=12, startTime=1732130788287; duration=0sec 2024-11-20T19:26:28,377 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:28,377 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:A 2024-11-20T19:26:28,377 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:28,380 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:28,380 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/C is initiating minor compaction (all files) 2024-11-20T19:26:28,380 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/C in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:28,380 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/b0cf7ec338244d46a323046cda139370, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/3883f3ba9a5841c9b5f67c719c7dd4da, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/01a3904dc55a447a898f14e14f9887b3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/770c9c33c579478083328aeade57faea] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=49.1 K 2024-11-20T19:26:28,381 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0cf7ec338244d46a323046cda139370, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1732130786417 2024-11-20T19:26:28,381 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3883f3ba9a5841c9b5f67c719c7dd4da, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732130786755 2024-11-20T19:26:28,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742190_1366 (size=13561) 2024-11-20T19:26:28,386 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01a3904dc55a447a898f14e14f9887b3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=508, earliestPutTs=1732130787395 2024-11-20T19:26:28,387 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 770c9c33c579478083328aeade57faea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1732130787740 2024-11-20T19:26:28,387 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T19:26:28,387 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:28,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:28,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:28,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:28,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:28,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:28,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:28,394 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/e471f8706e874cc9a1a129ad10cc6791 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e471f8706e874cc9a1a129ad10cc6791 2024-11-20T19:26:28,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/690f99b3321842f3904bac199d7ae4b4 is 50, key is test_row_0/A:col10/1732130788073/Put/seqid=0 2024-11-20T19:26:28,407 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/B of 05d8fd611fc9337dfa63e932920aeaaa into e471f8706e874cc9a1a129ad10cc6791(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:28,407 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:28,407 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/B, priority=12, startTime=1732130788287; duration=0sec 2024-11-20T19:26:28,407 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:28,407 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:B 2024-11-20T19:26:28,410 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#C#compaction#315 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:28,411 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/6802b246f2c14bb8bc0310ebc8e4cfef is 50, key is test_row_0/C:col10/1732130787741/Put/seqid=0 2024-11-20T19:26:28,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130848414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130848416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130848417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130848423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130848425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742192_1368 (size=14741) 2024-11-20T19:26:28,436 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=547 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/690f99b3321842f3904bac199d7ae4b4 2024-11-20T19:26:28,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T19:26:28,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742193_1369 (size=13561) 2024-11-20T19:26:28,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/d8261a00682b4c0dbfc29b85f374bcf5 is 50, key is test_row_0/B:col10/1732130788073/Put/seqid=0 2024-11-20T19:26:28,488 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,489 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T19:26:28,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:28,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:28,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:28,490 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:28,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742194_1370 (size=12301) 2024-11-20T19:26:28,505 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=547 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/d8261a00682b4c0dbfc29b85f374bcf5 2024-11-20T19:26:28,516 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/c4fe629abad64ea485cf9769015ae1da is 50, key is test_row_0/C:col10/1732130788073/Put/seqid=0 2024-11-20T19:26:28,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130848519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130848521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130848522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742195_1371 (size=12301) 2024-11-20T19:26:28,555 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=547 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/c4fe629abad64ea485cf9769015ae1da 2024-11-20T19:26:28,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/690f99b3321842f3904bac199d7ae4b4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/690f99b3321842f3904bac199d7ae4b4 2024-11-20T19:26:28,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/690f99b3321842f3904bac199d7ae4b4, entries=200, sequenceid=547, filesize=14.4 K 2024-11-20T19:26:28,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/d8261a00682b4c0dbfc29b85f374bcf5 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d8261a00682b4c0dbfc29b85f374bcf5 2024-11-20T19:26:28,574 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d8261a00682b4c0dbfc29b85f374bcf5, entries=150, sequenceid=547, filesize=12.0 K 2024-11-20T19:26:28,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/c4fe629abad64ea485cf9769015ae1da as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/c4fe629abad64ea485cf9769015ae1da 2024-11-20T19:26:28,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/c4fe629abad64ea485cf9769015ae1da, entries=150, sequenceid=547, filesize=12.0 K 2024-11-20T19:26:28,581 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 05d8fd611fc9337dfa63e932920aeaaa in 194ms, sequenceid=547, compaction requested=false 2024-11-20T19:26:28,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:28,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T19:26:28,643 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,643 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-20T19:26:28,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:28,644 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-20T19:26:28,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:28,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:28,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:28,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:28,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:28,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:28,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/eb87ded96a2141c8b52f1bce27e86bb6 is 50, key is test_row_0/A:col10/1732130788416/Put/seqid=0 2024-11-20T19:26:28,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742196_1372 (size=12301) 2024-11-20T19:26:28,703 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=566 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/eb87ded96a2141c8b52f1bce27e86bb6 2024-11-20T19:26:28,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/15620250c10741c0aa0cc5ba21a8c159 is 50, key is test_row_0/B:col10/1732130788416/Put/seqid=0 2024-11-20T19:26:28,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:28,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:28,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742197_1373 (size=12301) 2024-11-20T19:26:28,745 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=566 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/15620250c10741c0aa0cc5ba21a8c159 2024-11-20T19:26:28,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130848741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130848743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130848745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130848745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130848746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/38009ece6a6548c79fa95c573c4ee661 is 50, key is test_row_0/C:col10/1732130788416/Put/seqid=0 2024-11-20T19:26:28,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742198_1374 (size=12301) 2024-11-20T19:26:28,777 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=566 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/38009ece6a6548c79fa95c573c4ee661 2024-11-20T19:26:28,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/eb87ded96a2141c8b52f1bce27e86bb6 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/eb87ded96a2141c8b52f1bce27e86bb6 2024-11-20T19:26:28,795 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/eb87ded96a2141c8b52f1bce27e86bb6, entries=150, sequenceid=566, filesize=12.0 K 2024-11-20T19:26:28,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/15620250c10741c0aa0cc5ba21a8c159 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/15620250c10741c0aa0cc5ba21a8c159 2024-11-20T19:26:28,804 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/15620250c10741c0aa0cc5ba21a8c159, entries=150, sequenceid=566, filesize=12.0 K 2024-11-20T19:26:28,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/38009ece6a6548c79fa95c573c4ee661 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38009ece6a6548c79fa95c573c4ee661 2024-11-20T19:26:28,811 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38009ece6a6548c79fa95c573c4ee661, entries=150, sequenceid=566, filesize=12.0 K 2024-11-20T19:26:28,812 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 05d8fd611fc9337dfa63e932920aeaaa in 168ms, sequenceid=566, compaction requested=true 2024-11-20T19:26:28,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:28,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:28,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-20T19:26:28,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-20T19:26:28,815 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-20T19:26:28,815 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 476 msec 2024-11-20T19:26:28,816 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 481 msec 2024-11-20T19:26:28,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:28,850 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T19:26:28,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:28,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:28,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:28,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:28,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:28,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:28,870 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/6802b246f2c14bb8bc0310ebc8e4cfef as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6802b246f2c14bb8bc0310ebc8e4cfef 2024-11-20T19:26:28,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130848865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130848867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/d48e1f066a214011bf6326975fd2cac0 is 50, key is test_row_0/A:col10/1732130788737/Put/seqid=0 2024-11-20T19:26:28,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130848868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,877 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130848870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130848870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,878 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/C of 05d8fd611fc9337dfa63e932920aeaaa into 6802b246f2c14bb8bc0310ebc8e4cfef(size=13.2 K), total size for store is 37.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:28,878 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:28,878 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/C, priority=12, startTime=1732130788287; duration=0sec 2024-11-20T19:26:28,878 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:28,879 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:C 2024-11-20T19:26:28,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742199_1375 (size=14741) 2024-11-20T19:26:28,899 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=586 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/d48e1f066a214011bf6326975fd2cac0 2024-11-20T19:26:28,910 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/a1f1a2add6934b48b7827578aeb689d3 is 50, key is test_row_0/B:col10/1732130788737/Put/seqid=0 2024-11-20T19:26:28,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-20T19:26:28,939 INFO [Thread-1176 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-20T19:26:28,945 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:28,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-20T19:26:28,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T19:26:28,947 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:28,949 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:28,949 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:28,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742200_1376 (size=12301) 2024-11-20T19:26:28,951 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=586 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/a1f1a2add6934b48b7827578aeb689d3 2024-11-20T19:26:28,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/91d106d147164479a204b1fb493c7745 is 50, key is test_row_0/C:col10/1732130788737/Put/seqid=0 2024-11-20T19:26:28,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130848971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130848972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130848978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130848979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:28,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130848980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:28,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742201_1377 (size=12301) 2024-11-20T19:26:29,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T19:26:29,103 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:29,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T19:26:29,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:29,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:29,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:29,104 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51532 deadline: 1732130849176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:29,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51562 deadline: 1732130849177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:29,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51610 deadline: 1732130849183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:29,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51516 deadline: 1732130849184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:29,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:29,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:51514 deadline: 1732130849184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:29,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T19:26:29,256 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:29,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T19:26:29,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:29,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:29,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:29,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,393 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=586 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/91d106d147164479a204b1fb493c7745 2024-11-20T19:26:29,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/d48e1f066a214011bf6326975fd2cac0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d48e1f066a214011bf6326975fd2cac0 2024-11-20T19:26:29,404 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d48e1f066a214011bf6326975fd2cac0, entries=200, sequenceid=586, filesize=14.4 K 2024-11-20T19:26:29,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/a1f1a2add6934b48b7827578aeb689d3 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a1f1a2add6934b48b7827578aeb689d3 2024-11-20T19:26:29,408 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:29,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a1f1a2add6934b48b7827578aeb689d3, entries=150, sequenceid=586, filesize=12.0 K 2024-11-20T19:26:29,409 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T19:26:29,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:29,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:29,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:29,409 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/91d106d147164479a204b1fb493c7745 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/91d106d147164479a204b1fb493c7745 2024-11-20T19:26:29,417 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/91d106d147164479a204b1fb493c7745, entries=150, sequenceid=586, filesize=12.0 K 2024-11-20T19:26:29,418 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 05d8fd611fc9337dfa63e932920aeaaa in 569ms, sequenceid=586, compaction requested=true 2024-11-20T19:26:29,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:29,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:29,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:29,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:29,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T19:26:29,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 05d8fd611fc9337dfa63e932920aeaaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:29,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-20T19:26:29,419 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:29,419 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:29,422 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50464 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:29,422 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/C is initiating minor compaction (all files) 2024-11-20T19:26:29,422 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/C in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:29,422 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6802b246f2c14bb8bc0310ebc8e4cfef, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/c4fe629abad64ea485cf9769015ae1da, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38009ece6a6548c79fa95c573c4ee661, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/91d106d147164479a204b1fb493c7745] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=49.3 K 2024-11-20T19:26:29,422 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55344 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:29,423 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/A is initiating minor compaction (all files) 2024-11-20T19:26:29,423 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/A in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:29,423 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/ef515ed2d756407cb3b3c1c927d72f4b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/690f99b3321842f3904bac199d7ae4b4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/eb87ded96a2141c8b52f1bce27e86bb6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d48e1f066a214011bf6326975fd2cac0] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=54.0 K 2024-11-20T19:26:29,423 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef515ed2d756407cb3b3c1c927d72f4b, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1732130787740 2024-11-20T19:26:29,423 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 6802b246f2c14bb8bc0310ebc8e4cfef, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1732130787740 2024-11-20T19:26:29,424 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 690f99b3321842f3904bac199d7ae4b4, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=547, earliestPutTs=1732130788060 2024-11-20T19:26:29,424 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb87ded96a2141c8b52f1bce27e86bb6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=566, earliestPutTs=1732130788410 2024-11-20T19:26:29,424 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting c4fe629abad64ea485cf9769015ae1da, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=547, earliestPutTs=1732130788060 2024-11-20T19:26:29,425 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d48e1f066a214011bf6326975fd2cac0, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=586, earliestPutTs=1732130788737 2024-11-20T19:26:29,425 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 38009ece6a6548c79fa95c573c4ee661, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=566, earliestPutTs=1732130788410 2024-11-20T19:26:29,426 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 91d106d147164479a204b1fb493c7745, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=586, earliestPutTs=1732130788737 2024-11-20T19:26:29,453 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#C#compaction#324 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:29,454 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/813042e4866347098cbb3fa58f86c648 is 50, key is test_row_0/C:col10/1732130788737/Put/seqid=0 2024-11-20T19:26:29,456 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#A#compaction#325 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:29,457 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/0f739000387a40389861ed1fa78ff217 is 50, key is test_row_0/A:col10/1732130788737/Put/seqid=0 2024-11-20T19:26:29,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742202_1378 (size=13697) 2024-11-20T19:26:29,486 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-20T19:26:29,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:29,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:29,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:29,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:29,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:29,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:29,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:29,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/2d728f44ca1f48a0a8ae2300193f121b is 50, key is test_row_0/A:col10/1732130788868/Put/seqid=0 2024-11-20T19:26:29,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742203_1379 (size=13697) 2024-11-20T19:26:29,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742204_1380 (size=17181) 2024-11-20T19:26:29,504 DEBUG [Thread-1177 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14ed1e44 to 127.0.0.1:50476 2024-11-20T19:26:29,504 DEBUG [Thread-1177 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:29,505 DEBUG [Thread-1181 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x036642cb to 127.0.0.1:50476 2024-11-20T19:26:29,505 DEBUG [Thread-1181 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:29,506 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/813042e4866347098cbb3fa58f86c648 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/813042e4866347098cbb3fa58f86c648 2024-11-20T19:26:29,507 DEBUG [Thread-1179 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72537a47 to 127.0.0.1:50476 2024-11-20T19:26:29,507 DEBUG [Thread-1179 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:29,506 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=606 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/2d728f44ca1f48a0a8ae2300193f121b 2024-11-20T19:26:29,507 DEBUG [Thread-1185 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x605827c9 to 127.0.0.1:50476 2024-11-20T19:26:29,507 DEBUG [Thread-1185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:29,508 DEBUG [Thread-1170 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c480dfb to 127.0.0.1:50476 2024-11-20T19:26:29,508 DEBUG [Thread-1170 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:29,508 DEBUG [Thread-1172 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34cb3991 to 127.0.0.1:50476 2024-11-20T19:26:29,508 DEBUG [Thread-1172 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:29,509 DEBUG [Thread-1174 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e9ae050 to 127.0.0.1:50476 2024-11-20T19:26:29,509 DEBUG [Thread-1174 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:29,510 DEBUG [Thread-1166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c1ac389 to 127.0.0.1:50476 2024-11-20T19:26:29,510 DEBUG [Thread-1166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:29,510 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/0f739000387a40389861ed1fa78ff217 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/0f739000387a40389861ed1fa78ff217 2024-11-20T19:26:29,511 DEBUG [Thread-1168 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x028e73c0 to 127.0.0.1:50476 2024-11-20T19:26:29,511 DEBUG [Thread-1168 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:29,511 DEBUG [Thread-1183 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c299cfb to 127.0.0.1:50476 2024-11-20T19:26:29,512 DEBUG [Thread-1183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:29,522 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/C of 05d8fd611fc9337dfa63e932920aeaaa into 813042e4866347098cbb3fa58f86c648(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:29,522 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/A of 05d8fd611fc9337dfa63e932920aeaaa into 0f739000387a40389861ed1fa78ff217(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:29,522 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:29,522 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:29,522 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/A, priority=12, startTime=1732130789419; duration=0sec 2024-11-20T19:26:29,522 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/C, priority=12, startTime=1732130789419; duration=0sec 2024-11-20T19:26:29,522 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:29,522 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:29,522 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:C 2024-11-20T19:26:29,522 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:A 2024-11-20T19:26:29,522 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:29,524 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50464 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:29,524 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 05d8fd611fc9337dfa63e932920aeaaa/B is initiating minor compaction (all files) 2024-11-20T19:26:29,524 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 05d8fd611fc9337dfa63e932920aeaaa/B in TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:29,524 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e471f8706e874cc9a1a129ad10cc6791, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d8261a00682b4c0dbfc29b85f374bcf5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/15620250c10741c0aa0cc5ba21a8c159, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a1f1a2add6934b48b7827578aeb689d3] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp, totalSize=49.3 K 2024-11-20T19:26:29,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/cbf7b4355db4445fa7ef7d987744d4a2 is 50, key is test_row_0/B:col10/1732130788868/Put/seqid=0 2024-11-20T19:26:29,524 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting e471f8706e874cc9a1a129ad10cc6791, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=528, earliestPutTs=1732130787740 2024-11-20T19:26:29,525 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting d8261a00682b4c0dbfc29b85f374bcf5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=547, earliestPutTs=1732130788060 2024-11-20T19:26:29,525 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 15620250c10741c0aa0cc5ba21a8c159, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=566, earliestPutTs=1732130788410 2024-11-20T19:26:29,525 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a1f1a2add6934b48b7827578aeb689d3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=586, earliestPutTs=1732130788737 2024-11-20T19:26:29,548 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 05d8fd611fc9337dfa63e932920aeaaa#B#compaction#328 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:29,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T19:26:29,550 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/6a874434fe924191baddae2c6828b6d7 is 50, key is test_row_0/B:col10/1732130788737/Put/seqid=0 2024-11-20T19:26:29,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742205_1381 (size=12301) 2024-11-20T19:26:29,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=606 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/cbf7b4355db4445fa7ef7d987744d4a2 2024-11-20T19:26:29,561 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:29,562 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T19:26:29,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:29,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:29,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:29,562 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/511e00f007da4de4ac3cf170147991df is 50, key is test_row_0/C:col10/1732130788868/Put/seqid=0 2024-11-20T19:26:29,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742206_1382 (size=13697) 2024-11-20T19:26:29,599 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/6a874434fe924191baddae2c6828b6d7 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/6a874434fe924191baddae2c6828b6d7 2024-11-20T19:26:29,605 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 05d8fd611fc9337dfa63e932920aeaaa/B of 05d8fd611fc9337dfa63e932920aeaaa into 6a874434fe924191baddae2c6828b6d7(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:29,605 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:29,605 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa., storeName=05d8fd611fc9337dfa63e932920aeaaa/B, priority=12, startTime=1732130789419; duration=0sec 2024-11-20T19:26:29,605 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:29,605 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 05d8fd611fc9337dfa63e932920aeaaa:B 2024-11-20T19:26:29,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742207_1383 (size=12301) 2024-11-20T19:26:29,715 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:29,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T19:26:29,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:29,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:29,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:29,715 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,867 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:29,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T19:26:29,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:29,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:29,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:29,868 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:29,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:30,019 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:30,020 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T19:26:30,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:30,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. as already flushing 2024-11-20T19:26:30,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:30,020 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:30,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:30,020 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=606 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/511e00f007da4de4ac3cf170147991df 2024-11-20T19:26:30,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:30,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/2d728f44ca1f48a0a8ae2300193f121b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/2d728f44ca1f48a0a8ae2300193f121b 2024-11-20T19:26:30,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/2d728f44ca1f48a0a8ae2300193f121b, entries=250, sequenceid=606, filesize=16.8 K 2024-11-20T19:26:30,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/cbf7b4355db4445fa7ef7d987744d4a2 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/cbf7b4355db4445fa7ef7d987744d4a2 2024-11-20T19:26:30,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/cbf7b4355db4445fa7ef7d987744d4a2, entries=150, sequenceid=606, filesize=12.0 K 2024-11-20T19:26:30,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/511e00f007da4de4ac3cf170147991df as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/511e00f007da4de4ac3cf170147991df 2024-11-20T19:26:30,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/511e00f007da4de4ac3cf170147991df, entries=150, sequenceid=606, filesize=12.0 K 2024-11-20T19:26:30,036 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=67.09 KB/68700 for 05d8fd611fc9337dfa63e932920aeaaa in 550ms, sequenceid=606, compaction requested=false 2024-11-20T19:26:30,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:30,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T19:26:30,173 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:30,173 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-20T19:26:30,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:30,173 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 05d8fd611fc9337dfa63e932920aeaaa 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:26:30,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=A 2024-11-20T19:26:30,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:30,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=B 2024-11-20T19:26:30,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:30,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 05d8fd611fc9337dfa63e932920aeaaa, store=C 2024-11-20T19:26:30,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:30,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/1d8daf814a1244b1967a49fce32c9e2e is 50, key is test_row_0/A:col10/1732130789509/Put/seqid=0 2024-11-20T19:26:30,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742208_1384 (size=12301) 2024-11-20T19:26:30,584 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=622 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/1d8daf814a1244b1967a49fce32c9e2e 2024-11-20T19:26:30,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/95ab6fc9b8704a86b72fdba19c5626cb is 50, key is test_row_0/B:col10/1732130789509/Put/seqid=0 2024-11-20T19:26:30,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742209_1385 (size=12301) 2024-11-20T19:26:30,993 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=622 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/95ab6fc9b8704a86b72fdba19c5626cb 2024-11-20T19:26:30,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/2af7867a938841d892cd1864d5d4d8c0 is 50, key is test_row_0/C:col10/1732130789509/Put/seqid=0 2024-11-20T19:26:31,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742210_1386 (size=12301) 2024-11-20T19:26:31,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T19:26:31,404 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=622 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/2af7867a938841d892cd1864d5d4d8c0 2024-11-20T19:26:31,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/A/1d8daf814a1244b1967a49fce32c9e2e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/1d8daf814a1244b1967a49fce32c9e2e 2024-11-20T19:26:31,410 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/1d8daf814a1244b1967a49fce32c9e2e, entries=150, sequenceid=622, filesize=12.0 K 2024-11-20T19:26:31,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/B/95ab6fc9b8704a86b72fdba19c5626cb as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/95ab6fc9b8704a86b72fdba19c5626cb 2024-11-20T19:26:31,419 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/95ab6fc9b8704a86b72fdba19c5626cb, entries=150, sequenceid=622, filesize=12.0 K 2024-11-20T19:26:31,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/.tmp/C/2af7867a938841d892cd1864d5d4d8c0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/2af7867a938841d892cd1864d5d4d8c0 2024-11-20T19:26:31,423 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/2af7867a938841d892cd1864d5d4d8c0, entries=150, sequenceid=622, filesize=12.0 K 2024-11-20T19:26:31,424 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for 05d8fd611fc9337dfa63e932920aeaaa in 1250ms, sequenceid=622, compaction requested=true 2024-11-20T19:26:31,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:31,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:31,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-20T19:26:31,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-20T19:26:31,426 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-20T19:26:31,427 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4760 sec 2024-11-20T19:26:31,428 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 2.4820 sec 2024-11-20T19:26:31,574 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T19:26:33,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-20T19:26:33,051 INFO [Thread-1176 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-20T19:26:33,052 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T19:26:33,052 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 95 2024-11-20T19:26:33,052 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 94 2024-11-20T19:26:33,052 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 99 2024-11-20T19:26:33,052 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 98 2024-11-20T19:26:33,052 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 103 2024-11-20T19:26:33,052 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T19:26:33,052 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4659 2024-11-20T19:26:33,052 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4528 2024-11-20T19:26:33,052 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4535 2024-11-20T19:26:33,052 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4654 2024-11-20T19:26:33,052 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4535 2024-11-20T19:26:33,052 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T19:26:33,052 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T19:26:33,052 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64dc42d9 to 127.0.0.1:50476 2024-11-20T19:26:33,052 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:33,053 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T19:26:33,053 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T19:26:33,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:33,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T19:26:33,055 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130793055"}]},"ts":"1732130793055"} 2024-11-20T19:26:33,056 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T19:26:33,100 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T19:26:33,101 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:26:33,102 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=05d8fd611fc9337dfa63e932920aeaaa, UNASSIGN}] 2024-11-20T19:26:33,102 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=05d8fd611fc9337dfa63e932920aeaaa, UNASSIGN 2024-11-20T19:26:33,103 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=05d8fd611fc9337dfa63e932920aeaaa, regionState=CLOSING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:33,103 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:26:33,104 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; CloseRegionProcedure 05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:26:33,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T19:26:33,255 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:33,255 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] handler.UnassignRegionHandler(124): Close 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:33,255 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:26:33,255 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1681): Closing 05d8fd611fc9337dfa63e932920aeaaa, disabling compactions & flushes 2024-11-20T19:26:33,255 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:33,255 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:33,255 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. after waiting 0 ms 2024-11-20T19:26:33,255 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:33,256 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c8c56cc5f4204ae3bbe998eeee2e5c2d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/931caa3bf796422688da3db7d1e4a6fd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/43f5e6fb8704408d9513b67cd577d03a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/73017182bc454e0e9413c7eb49a148a1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/75fb8bbc8b8e4bb2a63ccc3b731351de, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/dc0e7a722f1945c7acfd91186cdfd87e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/2112fa7dac4041a4a771c2d85fc1e3bf, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/6989f3b6aa454e208badaf3c9a06b4bc, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/e748ea77912e410f8bd97934806d6d18, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c19d89fd2b35492ebb2c01b973653e7f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/a07c26eaeaa04655bb862778399582c0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d137baf231644f3194b4f4e229e32540, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/f2d2b20c38534d2180b8356c36bd8cb5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/99eed924d1244a78b869e6b520694db4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/7977c0a5d0564a42abf9e4dfdbce39be, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d15bf91b62c741729fd6c88bb714fd81, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/0eb3d831fa764533bec44e8330c1ae89, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c61809783be9453cb91b339ba42bc63b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/ea45af8bb33e4942ab7674d4c36492b8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/b18223ef8c1d4d3c9dbe5596f9ab457e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/87d8166141104f26a753936f56ff5ab3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/539b910e9832471eb3bfed67e7e4fb98, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/28d4499a310146c4b7ce7a6a5eea857c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c12c3d01d27f407bbfbdff8a22884a68, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d089ee2616ff4039aedade0e561690e0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/85c0c021c6374bbeb60b8a6a92264ca1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/a725c3aa6f974dc6893652ae4c818599, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/171efe92a0ef4eaaa6e77f195931542b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/8b5fe3e184ed40b5b1bd13ee4e89d16c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/5664dc8624d54452aa871e29a46f54a3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d0a67573018d4c4285798ba67eccb5eb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/1c4d7d4be38046cd9cb18413de00db73, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/7fc07c0f339d4fe4a312c203d7f4eb0c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/8320fe79931945be82f4e71c3e2f83fa, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/42306629606543819d554d8fda3638bb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/afe3d4010c014393a1d069f2b579d58c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/170c9ac8e473411c82fcb6562999e99d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/ef515ed2d756407cb3b3c1c927d72f4b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/690f99b3321842f3904bac199d7ae4b4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/eb87ded96a2141c8b52f1bce27e86bb6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d48e1f066a214011bf6326975fd2cac0] to archive 2024-11-20T19:26:33,257 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:26:33,258 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c8c56cc5f4204ae3bbe998eeee2e5c2d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c8c56cc5f4204ae3bbe998eeee2e5c2d 2024-11-20T19:26:33,259 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/931caa3bf796422688da3db7d1e4a6fd to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/931caa3bf796422688da3db7d1e4a6fd 2024-11-20T19:26:33,261 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/43f5e6fb8704408d9513b67cd577d03a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/43f5e6fb8704408d9513b67cd577d03a 2024-11-20T19:26:33,261 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/73017182bc454e0e9413c7eb49a148a1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/73017182bc454e0e9413c7eb49a148a1 2024-11-20T19:26:33,262 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/75fb8bbc8b8e4bb2a63ccc3b731351de to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/75fb8bbc8b8e4bb2a63ccc3b731351de 2024-11-20T19:26:33,263 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/dc0e7a722f1945c7acfd91186cdfd87e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/dc0e7a722f1945c7acfd91186cdfd87e 2024-11-20T19:26:33,264 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/2112fa7dac4041a4a771c2d85fc1e3bf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/2112fa7dac4041a4a771c2d85fc1e3bf 2024-11-20T19:26:33,265 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/6989f3b6aa454e208badaf3c9a06b4bc to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/6989f3b6aa454e208badaf3c9a06b4bc 2024-11-20T19:26:33,266 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/e748ea77912e410f8bd97934806d6d18 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/e748ea77912e410f8bd97934806d6d18 2024-11-20T19:26:33,266 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c19d89fd2b35492ebb2c01b973653e7f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c19d89fd2b35492ebb2c01b973653e7f 2024-11-20T19:26:33,267 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/a07c26eaeaa04655bb862778399582c0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/a07c26eaeaa04655bb862778399582c0 2024-11-20T19:26:33,268 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d137baf231644f3194b4f4e229e32540 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d137baf231644f3194b4f4e229e32540 2024-11-20T19:26:33,269 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/f2d2b20c38534d2180b8356c36bd8cb5 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/f2d2b20c38534d2180b8356c36bd8cb5 2024-11-20T19:26:33,270 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/99eed924d1244a78b869e6b520694db4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/99eed924d1244a78b869e6b520694db4 2024-11-20T19:26:33,271 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/7977c0a5d0564a42abf9e4dfdbce39be to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/7977c0a5d0564a42abf9e4dfdbce39be 2024-11-20T19:26:33,272 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d15bf91b62c741729fd6c88bb714fd81 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d15bf91b62c741729fd6c88bb714fd81 2024-11-20T19:26:33,272 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/0eb3d831fa764533bec44e8330c1ae89 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/0eb3d831fa764533bec44e8330c1ae89 2024-11-20T19:26:33,273 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c61809783be9453cb91b339ba42bc63b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c61809783be9453cb91b339ba42bc63b 2024-11-20T19:26:33,274 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/ea45af8bb33e4942ab7674d4c36492b8 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/ea45af8bb33e4942ab7674d4c36492b8 2024-11-20T19:26:33,275 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/b18223ef8c1d4d3c9dbe5596f9ab457e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/b18223ef8c1d4d3c9dbe5596f9ab457e 2024-11-20T19:26:33,276 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/87d8166141104f26a753936f56ff5ab3 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/87d8166141104f26a753936f56ff5ab3 2024-11-20T19:26:33,276 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/539b910e9832471eb3bfed67e7e4fb98 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/539b910e9832471eb3bfed67e7e4fb98 2024-11-20T19:26:33,277 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/28d4499a310146c4b7ce7a6a5eea857c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/28d4499a310146c4b7ce7a6a5eea857c 2024-11-20T19:26:33,278 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c12c3d01d27f407bbfbdff8a22884a68 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/c12c3d01d27f407bbfbdff8a22884a68 2024-11-20T19:26:33,279 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d089ee2616ff4039aedade0e561690e0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d089ee2616ff4039aedade0e561690e0 2024-11-20T19:26:33,280 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/85c0c021c6374bbeb60b8a6a92264ca1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/85c0c021c6374bbeb60b8a6a92264ca1 2024-11-20T19:26:33,281 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/a725c3aa6f974dc6893652ae4c818599 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/a725c3aa6f974dc6893652ae4c818599 2024-11-20T19:26:33,281 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/171efe92a0ef4eaaa6e77f195931542b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/171efe92a0ef4eaaa6e77f195931542b 2024-11-20T19:26:33,282 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/8b5fe3e184ed40b5b1bd13ee4e89d16c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/8b5fe3e184ed40b5b1bd13ee4e89d16c 2024-11-20T19:26:33,283 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/5664dc8624d54452aa871e29a46f54a3 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/5664dc8624d54452aa871e29a46f54a3 2024-11-20T19:26:33,284 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d0a67573018d4c4285798ba67eccb5eb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d0a67573018d4c4285798ba67eccb5eb 2024-11-20T19:26:33,285 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/1c4d7d4be38046cd9cb18413de00db73 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/1c4d7d4be38046cd9cb18413de00db73 2024-11-20T19:26:33,286 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/7fc07c0f339d4fe4a312c203d7f4eb0c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/7fc07c0f339d4fe4a312c203d7f4eb0c 2024-11-20T19:26:33,286 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/8320fe79931945be82f4e71c3e2f83fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/8320fe79931945be82f4e71c3e2f83fa 2024-11-20T19:26:33,288 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/42306629606543819d554d8fda3638bb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/42306629606543819d554d8fda3638bb 2024-11-20T19:26:33,289 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/afe3d4010c014393a1d069f2b579d58c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/afe3d4010c014393a1d069f2b579d58c 2024-11-20T19:26:33,289 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/170c9ac8e473411c82fcb6562999e99d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/170c9ac8e473411c82fcb6562999e99d 2024-11-20T19:26:33,290 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/ef515ed2d756407cb3b3c1c927d72f4b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/ef515ed2d756407cb3b3c1c927d72f4b 2024-11-20T19:26:33,291 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/690f99b3321842f3904bac199d7ae4b4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/690f99b3321842f3904bac199d7ae4b4 2024-11-20T19:26:33,292 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/eb87ded96a2141c8b52f1bce27e86bb6 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/eb87ded96a2141c8b52f1bce27e86bb6 2024-11-20T19:26:33,293 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d48e1f066a214011bf6326975fd2cac0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/d48e1f066a214011bf6326975fd2cac0 2024-11-20T19:26:33,294 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9e81353f330a4f4b904d723b94f20194, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d5e1b82353064570bb3b81f54a986392, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/f8649629e61e465887d292d0db02fd03, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e5a9d1df656643fe9fd4122bf9b72a8d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/7cc65a8b12a544e1ae6e692d6037b4de, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/dd7371fd85f0483c9f7f98876cbf97a4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8bbd64bbd6984190a786cd601e13bce2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/bfdaa9a083a647a2bc57e571475cac54, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d83471f4418c4053b1d7ff3aa7d13cbb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b3ca0d2390364c529c972ee348e60d2d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9c47e0fad07942af82643097320bcc46, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/6fe06271bd854a6c9b8b959c8d36223f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/f5366a0bed634bbe8e0793e4a97fddeb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e961359b25f54a3492e821cb7eb33bbe, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e431074230474a258c928728de0d76ab, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/7053c3b16d4d47478b421dfce215085b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8599d282c5964fe2b8c0ef3bcb430c1a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/04a8321584b84536a8e718e3514ad7a9, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a98075c22d8644018b804a2bb85ddfb5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b1acc767926c4202a9f089b0c60a91a7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/3905e77381a14805b2d02fbf29ae976d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/0c3d7035a3544413a139ba7fc69a7ad9, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/895f8bca252e4879a42c833609c958bb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/dd36d00f07274268a2935dd48f5781f3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/572bc72e3a0641659d13510ed68d8c51, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8305c5ec8b804bdfb2f1a78c5f881ac1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a11a3c5611d946a7a9d3083005e84cef, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b5cf9b53c0354df8814ec96049443efa, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/5aa12eb314c84762838165c715a0d910, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/2a32a54ce413476dbe2d6e11d920465d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/0418a0551824431983ce54857569cc5d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8c186c834dc14eabbdf30063f5ad8b4f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/96cf00cc3ce94f8a8adf1fbbf4befed2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/4ceae5e2a2d644c8ad273b4bc8053173, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/fba3656cdd8245969fe933a5f63a7bf3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9cc966041ef948969a8ff15275e0086f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e471f8706e874cc9a1a129ad10cc6791, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a5eac779b3c14e8fb72d779d941d9537, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d8261a00682b4c0dbfc29b85f374bcf5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/15620250c10741c0aa0cc5ba21a8c159, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a1f1a2add6934b48b7827578aeb689d3] to archive 2024-11-20T19:26:33,295 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:26:33,296 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9e81353f330a4f4b904d723b94f20194 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9e81353f330a4f4b904d723b94f20194 2024-11-20T19:26:33,297 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d5e1b82353064570bb3b81f54a986392 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d5e1b82353064570bb3b81f54a986392 2024-11-20T19:26:33,298 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/f8649629e61e465887d292d0db02fd03 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/f8649629e61e465887d292d0db02fd03 2024-11-20T19:26:33,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e5a9d1df656643fe9fd4122bf9b72a8d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e5a9d1df656643fe9fd4122bf9b72a8d 2024-11-20T19:26:33,300 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/7cc65a8b12a544e1ae6e692d6037b4de to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/7cc65a8b12a544e1ae6e692d6037b4de 2024-11-20T19:26:33,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/dd7371fd85f0483c9f7f98876cbf97a4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/dd7371fd85f0483c9f7f98876cbf97a4 2024-11-20T19:26:33,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8bbd64bbd6984190a786cd601e13bce2 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8bbd64bbd6984190a786cd601e13bce2 2024-11-20T19:26:33,303 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/bfdaa9a083a647a2bc57e571475cac54 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/bfdaa9a083a647a2bc57e571475cac54 2024-11-20T19:26:33,304 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d83471f4418c4053b1d7ff3aa7d13cbb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d83471f4418c4053b1d7ff3aa7d13cbb 2024-11-20T19:26:33,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b3ca0d2390364c529c972ee348e60d2d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b3ca0d2390364c529c972ee348e60d2d 2024-11-20T19:26:33,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9c47e0fad07942af82643097320bcc46 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9c47e0fad07942af82643097320bcc46 2024-11-20T19:26:33,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/6fe06271bd854a6c9b8b959c8d36223f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/6fe06271bd854a6c9b8b959c8d36223f 2024-11-20T19:26:33,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/f5366a0bed634bbe8e0793e4a97fddeb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/f5366a0bed634bbe8e0793e4a97fddeb 2024-11-20T19:26:33,308 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e961359b25f54a3492e821cb7eb33bbe to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e961359b25f54a3492e821cb7eb33bbe 2024-11-20T19:26:33,309 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e431074230474a258c928728de0d76ab to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e431074230474a258c928728de0d76ab 2024-11-20T19:26:33,310 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/7053c3b16d4d47478b421dfce215085b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/7053c3b16d4d47478b421dfce215085b 2024-11-20T19:26:33,311 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8599d282c5964fe2b8c0ef3bcb430c1a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8599d282c5964fe2b8c0ef3bcb430c1a 2024-11-20T19:26:33,312 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/04a8321584b84536a8e718e3514ad7a9 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/04a8321584b84536a8e718e3514ad7a9 2024-11-20T19:26:33,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a98075c22d8644018b804a2bb85ddfb5 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a98075c22d8644018b804a2bb85ddfb5 2024-11-20T19:26:33,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b1acc767926c4202a9f089b0c60a91a7 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b1acc767926c4202a9f089b0c60a91a7 2024-11-20T19:26:33,315 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/3905e77381a14805b2d02fbf29ae976d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/3905e77381a14805b2d02fbf29ae976d 2024-11-20T19:26:33,316 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/0c3d7035a3544413a139ba7fc69a7ad9 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/0c3d7035a3544413a139ba7fc69a7ad9 2024-11-20T19:26:33,317 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/895f8bca252e4879a42c833609c958bb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/895f8bca252e4879a42c833609c958bb 2024-11-20T19:26:33,317 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/dd36d00f07274268a2935dd48f5781f3 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/dd36d00f07274268a2935dd48f5781f3 2024-11-20T19:26:33,319 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/572bc72e3a0641659d13510ed68d8c51 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/572bc72e3a0641659d13510ed68d8c51 2024-11-20T19:26:33,320 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8305c5ec8b804bdfb2f1a78c5f881ac1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8305c5ec8b804bdfb2f1a78c5f881ac1 2024-11-20T19:26:33,321 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a11a3c5611d946a7a9d3083005e84cef to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a11a3c5611d946a7a9d3083005e84cef 2024-11-20T19:26:33,322 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b5cf9b53c0354df8814ec96049443efa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/b5cf9b53c0354df8814ec96049443efa 2024-11-20T19:26:33,323 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/5aa12eb314c84762838165c715a0d910 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/5aa12eb314c84762838165c715a0d910 2024-11-20T19:26:33,324 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/2a32a54ce413476dbe2d6e11d920465d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/2a32a54ce413476dbe2d6e11d920465d 2024-11-20T19:26:33,327 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/0418a0551824431983ce54857569cc5d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/0418a0551824431983ce54857569cc5d 2024-11-20T19:26:33,328 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8c186c834dc14eabbdf30063f5ad8b4f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/8c186c834dc14eabbdf30063f5ad8b4f 2024-11-20T19:26:33,329 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/96cf00cc3ce94f8a8adf1fbbf4befed2 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/96cf00cc3ce94f8a8adf1fbbf4befed2 2024-11-20T19:26:33,330 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/4ceae5e2a2d644c8ad273b4bc8053173 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/4ceae5e2a2d644c8ad273b4bc8053173 2024-11-20T19:26:33,331 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/fba3656cdd8245969fe933a5f63a7bf3 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/fba3656cdd8245969fe933a5f63a7bf3 2024-11-20T19:26:33,332 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9cc966041ef948969a8ff15275e0086f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/9cc966041ef948969a8ff15275e0086f 2024-11-20T19:26:33,332 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e471f8706e874cc9a1a129ad10cc6791 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/e471f8706e874cc9a1a129ad10cc6791 2024-11-20T19:26:33,333 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a5eac779b3c14e8fb72d779d941d9537 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a5eac779b3c14e8fb72d779d941d9537 2024-11-20T19:26:33,334 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d8261a00682b4c0dbfc29b85f374bcf5 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/d8261a00682b4c0dbfc29b85f374bcf5 2024-11-20T19:26:33,335 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/15620250c10741c0aa0cc5ba21a8c159 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/15620250c10741c0aa0cc5ba21a8c159 2024-11-20T19:26:33,335 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a1f1a2add6934b48b7827578aeb689d3 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/a1f1a2add6934b48b7827578aeb689d3 2024-11-20T19:26:33,337 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/34d05b6c713747e382779114be320326, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/b6a63137f3d94eb78da88d5848b9c589, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/377bb4af2e504b4da8c8e0ac4c5c59b4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/e204a7992fa84bd8a53ac95f9f238dd8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/4a0cbacaba0a448e9a058db726e9330a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/70b3aeec342a4cfea8b4c2e53a0c3bc7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/ad3d676ee9f04fa1a1b8cf8af1441bf2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38406260cacb4519b4243ec3430d97dc, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/7e8c5abd725146c18fc1b48e6d492388, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d6483ddc42fa46559c59603d7300cb0e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/8a2c92b3e91f4fba9f6e8906ebebee3c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d315dc808b8f4eb69077070d9418a21e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/1a63b25e568f4256954fc6a73ebb4dc4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/5621df6a528f40baa9b3ffca33ff6cf7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/9f35914114464065a5b285859d3a91d6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/f74e137b8f3040c98accfd3551cc85e4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d359adfeb1ef4ea49a600b3bf06c0269, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/2fe20ae141974fd08af8e2ea8658bf1a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/de1d30685c014e76ad83a9cdbfe3287f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/ff0bb47871324e71b0e33266dfaf947e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/1d5734ed25f84e01aa8612a39109ff76, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/29aa7a1ad6984937b50153a349c50f74, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/234f45cbd4694bfca651233b8c7ca553, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/7b22a3ddf8134ea9954af8155ac58047, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/33b47e5fc8a5448f81602fae7c2015dc, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/8817965b906b4ce0becfd6a96bbaed1a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/992c56511cc14b63ac3575a516cc8acf, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6221c04b13cf484594324639bee6cfdf, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/958d92cccd5e4ecbb23a5ff86b8c1c93, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38e9f11f6fec4427841a06c88a6d365e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/94fc1fc6a90f49a9811f4827a1465c52, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/fc346797f678432e9995cf6dcea011ec, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/b0cf7ec338244d46a323046cda139370, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6d958047b7f047a0978d038c703ea14e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/3883f3ba9a5841c9b5f67c719c7dd4da, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/01a3904dc55a447a898f14e14f9887b3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6802b246f2c14bb8bc0310ebc8e4cfef, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/770c9c33c579478083328aeade57faea, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/c4fe629abad64ea485cf9769015ae1da, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38009ece6a6548c79fa95c573c4ee661, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/91d106d147164479a204b1fb493c7745] to archive 2024-11-20T19:26:33,338 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:26:33,340 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/34d05b6c713747e382779114be320326 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/34d05b6c713747e382779114be320326 2024-11-20T19:26:33,340 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/b6a63137f3d94eb78da88d5848b9c589 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/b6a63137f3d94eb78da88d5848b9c589 2024-11-20T19:26:33,341 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/377bb4af2e504b4da8c8e0ac4c5c59b4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/377bb4af2e504b4da8c8e0ac4c5c59b4 2024-11-20T19:26:33,342 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/e204a7992fa84bd8a53ac95f9f238dd8 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/e204a7992fa84bd8a53ac95f9f238dd8 2024-11-20T19:26:33,343 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/4a0cbacaba0a448e9a058db726e9330a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/4a0cbacaba0a448e9a058db726e9330a 2024-11-20T19:26:33,344 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/70b3aeec342a4cfea8b4c2e53a0c3bc7 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/70b3aeec342a4cfea8b4c2e53a0c3bc7 2024-11-20T19:26:33,344 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/ad3d676ee9f04fa1a1b8cf8af1441bf2 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/ad3d676ee9f04fa1a1b8cf8af1441bf2 2024-11-20T19:26:33,346 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38406260cacb4519b4243ec3430d97dc to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38406260cacb4519b4243ec3430d97dc 2024-11-20T19:26:33,347 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/7e8c5abd725146c18fc1b48e6d492388 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/7e8c5abd725146c18fc1b48e6d492388 2024-11-20T19:26:33,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d6483ddc42fa46559c59603d7300cb0e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d6483ddc42fa46559c59603d7300cb0e 2024-11-20T19:26:33,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/8a2c92b3e91f4fba9f6e8906ebebee3c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/8a2c92b3e91f4fba9f6e8906ebebee3c 2024-11-20T19:26:33,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d315dc808b8f4eb69077070d9418a21e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d315dc808b8f4eb69077070d9418a21e 2024-11-20T19:26:33,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/1a63b25e568f4256954fc6a73ebb4dc4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/1a63b25e568f4256954fc6a73ebb4dc4 2024-11-20T19:26:33,351 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/5621df6a528f40baa9b3ffca33ff6cf7 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/5621df6a528f40baa9b3ffca33ff6cf7 2024-11-20T19:26:33,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/9f35914114464065a5b285859d3a91d6 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/9f35914114464065a5b285859d3a91d6 2024-11-20T19:26:33,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/f74e137b8f3040c98accfd3551cc85e4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/f74e137b8f3040c98accfd3551cc85e4 2024-11-20T19:26:33,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d359adfeb1ef4ea49a600b3bf06c0269 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/d359adfeb1ef4ea49a600b3bf06c0269 2024-11-20T19:26:33,354 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/2fe20ae141974fd08af8e2ea8658bf1a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/2fe20ae141974fd08af8e2ea8658bf1a 2024-11-20T19:26:33,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/de1d30685c014e76ad83a9cdbfe3287f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/de1d30685c014e76ad83a9cdbfe3287f 2024-11-20T19:26:33,356 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/ff0bb47871324e71b0e33266dfaf947e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/ff0bb47871324e71b0e33266dfaf947e 2024-11-20T19:26:33,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T19:26:33,357 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/1d5734ed25f84e01aa8612a39109ff76 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/1d5734ed25f84e01aa8612a39109ff76 2024-11-20T19:26:33,358 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/29aa7a1ad6984937b50153a349c50f74 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/29aa7a1ad6984937b50153a349c50f74 2024-11-20T19:26:33,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/234f45cbd4694bfca651233b8c7ca553 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/234f45cbd4694bfca651233b8c7ca553 2024-11-20T19:26:33,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/7b22a3ddf8134ea9954af8155ac58047 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/7b22a3ddf8134ea9954af8155ac58047 2024-11-20T19:26:33,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/33b47e5fc8a5448f81602fae7c2015dc to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/33b47e5fc8a5448f81602fae7c2015dc 2024-11-20T19:26:33,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/8817965b906b4ce0becfd6a96bbaed1a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/8817965b906b4ce0becfd6a96bbaed1a 2024-11-20T19:26:33,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/992c56511cc14b63ac3575a516cc8acf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/992c56511cc14b63ac3575a516cc8acf 2024-11-20T19:26:33,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6221c04b13cf484594324639bee6cfdf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6221c04b13cf484594324639bee6cfdf 2024-11-20T19:26:33,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/958d92cccd5e4ecbb23a5ff86b8c1c93 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/958d92cccd5e4ecbb23a5ff86b8c1c93 2024-11-20T19:26:33,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38e9f11f6fec4427841a06c88a6d365e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38e9f11f6fec4427841a06c88a6d365e 2024-11-20T19:26:33,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/94fc1fc6a90f49a9811f4827a1465c52 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/94fc1fc6a90f49a9811f4827a1465c52 2024-11-20T19:26:33,366 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/fc346797f678432e9995cf6dcea011ec to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/fc346797f678432e9995cf6dcea011ec 2024-11-20T19:26:33,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/b0cf7ec338244d46a323046cda139370 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/b0cf7ec338244d46a323046cda139370 2024-11-20T19:26:33,368 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6d958047b7f047a0978d038c703ea14e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6d958047b7f047a0978d038c703ea14e 2024-11-20T19:26:33,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/3883f3ba9a5841c9b5f67c719c7dd4da to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/3883f3ba9a5841c9b5f67c719c7dd4da 2024-11-20T19:26:33,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/01a3904dc55a447a898f14e14f9887b3 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/01a3904dc55a447a898f14e14f9887b3 2024-11-20T19:26:33,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6802b246f2c14bb8bc0310ebc8e4cfef to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/6802b246f2c14bb8bc0310ebc8e4cfef 2024-11-20T19:26:33,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/770c9c33c579478083328aeade57faea to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/770c9c33c579478083328aeade57faea 2024-11-20T19:26:33,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/c4fe629abad64ea485cf9769015ae1da to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/c4fe629abad64ea485cf9769015ae1da 2024-11-20T19:26:33,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38009ece6a6548c79fa95c573c4ee661 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/38009ece6a6548c79fa95c573c4ee661 2024-11-20T19:26:33,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/91d106d147164479a204b1fb493c7745 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/91d106d147164479a204b1fb493c7745 2024-11-20T19:26:33,378 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/recovered.edits/625.seqid, newMaxSeqId=625, maxSeqId=1 2024-11-20T19:26:33,379 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa. 2024-11-20T19:26:33,379 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1635): Region close journal for 05d8fd611fc9337dfa63e932920aeaaa: 2024-11-20T19:26:33,380 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] handler.UnassignRegionHandler(170): Closed 05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:33,380 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=05d8fd611fc9337dfa63e932920aeaaa, regionState=CLOSED 2024-11-20T19:26:33,382 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-20T19:26:33,382 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; CloseRegionProcedure 05d8fd611fc9337dfa63e932920aeaaa, server=db9c3a6c6492,35979,1732130703276 in 278 msec 2024-11-20T19:26:33,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=89, resume processing ppid=88 2024-11-20T19:26:33,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, ppid=88, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=05d8fd611fc9337dfa63e932920aeaaa, UNASSIGN in 280 msec 2024-11-20T19:26:33,384 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-20T19:26:33,384 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 282 msec 2024-11-20T19:26:33,385 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130793385"}]},"ts":"1732130793385"} 2024-11-20T19:26:33,386 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T19:26:33,392 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T19:26:33,394 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 340 msec 2024-11-20T19:26:33,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-20T19:26:33,657 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-20T19:26:33,657 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T19:26:33,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:33,659 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=91, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:33,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T19:26:33,659 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=91, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:33,660 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:33,662 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/recovered.edits] 2024-11-20T19:26:33,664 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/0f739000387a40389861ed1fa78ff217 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/0f739000387a40389861ed1fa78ff217 2024-11-20T19:26:33,665 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/1d8daf814a1244b1967a49fce32c9e2e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/1d8daf814a1244b1967a49fce32c9e2e 2024-11-20T19:26:33,666 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/2d728f44ca1f48a0a8ae2300193f121b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/A/2d728f44ca1f48a0a8ae2300193f121b 2024-11-20T19:26:33,668 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/6a874434fe924191baddae2c6828b6d7 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/6a874434fe924191baddae2c6828b6d7 2024-11-20T19:26:33,669 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/95ab6fc9b8704a86b72fdba19c5626cb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/95ab6fc9b8704a86b72fdba19c5626cb 2024-11-20T19:26:33,670 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/cbf7b4355db4445fa7ef7d987744d4a2 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/B/cbf7b4355db4445fa7ef7d987744d4a2 2024-11-20T19:26:33,671 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/2af7867a938841d892cd1864d5d4d8c0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/2af7867a938841d892cd1864d5d4d8c0 2024-11-20T19:26:33,672 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/511e00f007da4de4ac3cf170147991df to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/511e00f007da4de4ac3cf170147991df 2024-11-20T19:26:33,674 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/813042e4866347098cbb3fa58f86c648 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/C/813042e4866347098cbb3fa58f86c648 2024-11-20T19:26:33,676 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/recovered.edits/625.seqid to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa/recovered.edits/625.seqid 2024-11-20T19:26:33,677 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/05d8fd611fc9337dfa63e932920aeaaa 2024-11-20T19:26:33,677 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T19:26:33,679 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=91, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:33,683 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T19:26:33,686 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T19:26:33,687 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=91, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:33,687 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T19:26:33,687 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732130793687"}]},"ts":"9223372036854775807"} 2024-11-20T19:26:33,689 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T19:26:33,689 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 05d8fd611fc9337dfa63e932920aeaaa, NAME => 'TestAcidGuarantees,,1732130767238.05d8fd611fc9337dfa63e932920aeaaa.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T19:26:33,689 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T19:26:33,689 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732130793689"}]},"ts":"9223372036854775807"} 2024-11-20T19:26:33,691 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T19:26:33,701 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=91, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:33,702 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 43 msec 2024-11-20T19:26:33,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-20T19:26:33,760 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-20T19:26:33,769 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=240 (was 241), OpenFileDescriptor=455 (was 460), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=694 (was 678) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3379 (was 3487) 2024-11-20T19:26:33,779 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=240, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=694, ProcessCount=11, AvailableMemoryMB=3378 2024-11-20T19:26:33,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:26:33,780 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:26:33,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:33,781 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T19:26:33,782 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:33,782 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 92 2024-11-20T19:26:33,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-20T19:26:33,782 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T19:26:33,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742211_1387 (size=963) 2024-11-20T19:26:33,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-20T19:26:34,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-20T19:26:34,189 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d 2024-11-20T19:26:34,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742212_1388 (size=53) 2024-11-20T19:26:34,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-20T19:26:34,594 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:26:34,595 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing fea4de996b388410b192fd1511bd28fa, disabling compactions & flushes 2024-11-20T19:26:34,595 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:34,595 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:34,595 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. after waiting 0 ms 2024-11-20T19:26:34,595 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:34,595 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:34,595 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:34,596 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T19:26:34,597 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732130794596"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732130794596"}]},"ts":"1732130794596"} 2024-11-20T19:26:34,598 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T19:26:34,600 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T19:26:34,600 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130794600"}]},"ts":"1732130794600"} 2024-11-20T19:26:34,601 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T19:26:34,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fea4de996b388410b192fd1511bd28fa, ASSIGN}] 2024-11-20T19:26:34,619 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fea4de996b388410b192fd1511bd28fa, ASSIGN 2024-11-20T19:26:34,620 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=fea4de996b388410b192fd1511bd28fa, ASSIGN; state=OFFLINE, location=db9c3a6c6492,35979,1732130703276; forceNewPlan=false, retain=false 2024-11-20T19:26:34,770 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=fea4de996b388410b192fd1511bd28fa, regionState=OPENING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:34,771 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; OpenRegionProcedure fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:26:34,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-20T19:26:34,922 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:34,925 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:34,925 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(7285): Opening region: {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:26:34,925 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:34,925 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:26:34,925 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(7327): checking encryption for fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:34,925 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(7330): checking classloading for fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:34,926 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:34,927 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:34,928 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fea4de996b388410b192fd1511bd28fa columnFamilyName A 2024-11-20T19:26:34,928 DEBUG [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:34,928 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.HStore(327): Store=fea4de996b388410b192fd1511bd28fa/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:34,928 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:34,929 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:34,930 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fea4de996b388410b192fd1511bd28fa columnFamilyName B 2024-11-20T19:26:34,930 DEBUG [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:34,930 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.HStore(327): Store=fea4de996b388410b192fd1511bd28fa/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:34,930 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:34,931 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:34,931 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fea4de996b388410b192fd1511bd28fa columnFamilyName C 2024-11-20T19:26:34,931 DEBUG [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:34,931 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.HStore(327): Store=fea4de996b388410b192fd1511bd28fa/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:34,932 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:34,932 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:34,933 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:34,934 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:26:34,935 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1085): writing seq id for fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:34,937 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:26:34,937 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1102): Opened fea4de996b388410b192fd1511bd28fa; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67219265, jitterRate=0.001645103096961975}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:26:34,938 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1001): Region open journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:34,939 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., pid=94, masterSystemTime=1732130794922 2024-11-20T19:26:34,940 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:34,940 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:34,940 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=fea4de996b388410b192fd1511bd28fa, regionState=OPEN, openSeqNum=2, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:34,942 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-20T19:26:34,942 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; OpenRegionProcedure fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 in 170 msec 2024-11-20T19:26:34,943 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-11-20T19:26:34,943 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=fea4de996b388410b192fd1511bd28fa, ASSIGN in 325 msec 2024-11-20T19:26:34,944 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T19:26:34,944 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130794944"}]},"ts":"1732130794944"} 2024-11-20T19:26:34,945 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T19:26:34,984 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T19:26:34,986 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2040 sec 2024-11-20T19:26:35,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-11-20T19:26:35,887 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 92 completed 2024-11-20T19:26:35,891 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1df61dc9 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5fe71801 2024-11-20T19:26:35,901 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bf5e2f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:35,902 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:35,903 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39772, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:35,904 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T19:26:35,905 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34336, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T19:26:35,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:26:35,908 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:26:35,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T19:26:35,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742213_1389 (size=999) 2024-11-20T19:26:36,324 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-20T19:26:36,324 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-20T19:26:36,330 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:26:36,334 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fea4de996b388410b192fd1511bd28fa, REOPEN/MOVE}] 2024-11-20T19:26:36,334 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fea4de996b388410b192fd1511bd28fa, REOPEN/MOVE 2024-11-20T19:26:36,335 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=fea4de996b388410b192fd1511bd28fa, regionState=CLOSING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:36,335 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:26:36,336 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; CloseRegionProcedure fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:26:36,487 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:36,487 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(124): Close fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:36,488 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:26:36,488 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1681): Closing fea4de996b388410b192fd1511bd28fa, disabling compactions & flushes 2024-11-20T19:26:36,488 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:36,488 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:36,488 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. after waiting 0 ms 2024-11-20T19:26:36,488 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:36,492 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T19:26:36,492 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:36,492 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1635): Region close journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:36,492 WARN [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionServer(3786): Not adding moved region record: fea4de996b388410b192fd1511bd28fa to self. 2024-11-20T19:26:36,494 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(170): Closed fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:36,494 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=fea4de996b388410b192fd1511bd28fa, regionState=CLOSED 2024-11-20T19:26:36,497 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-20T19:26:36,497 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; CloseRegionProcedure fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 in 160 msec 2024-11-20T19:26:36,497 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=fea4de996b388410b192fd1511bd28fa, REOPEN/MOVE; state=CLOSED, location=db9c3a6c6492,35979,1732130703276; forceNewPlan=false, retain=true 2024-11-20T19:26:36,648 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=fea4de996b388410b192fd1511bd28fa, regionState=OPENING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:36,650 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=97, state=RUNNABLE; OpenRegionProcedure fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:26:36,803 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:36,810 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:36,810 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7285): Opening region: {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:26:36,811 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:36,812 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:26:36,812 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7327): checking encryption for fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:36,812 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7330): checking classloading for fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:36,814 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:36,815 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:36,815 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fea4de996b388410b192fd1511bd28fa columnFamilyName A 2024-11-20T19:26:36,817 DEBUG [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:36,818 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.HStore(327): Store=fea4de996b388410b192fd1511bd28fa/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:36,818 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:36,819 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:36,819 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fea4de996b388410b192fd1511bd28fa columnFamilyName B 2024-11-20T19:26:36,819 DEBUG [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:36,820 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.HStore(327): Store=fea4de996b388410b192fd1511bd28fa/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:36,820 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:36,821 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:26:36,821 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fea4de996b388410b192fd1511bd28fa columnFamilyName C 2024-11-20T19:26:36,821 DEBUG [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:36,821 INFO [StoreOpener-fea4de996b388410b192fd1511bd28fa-1 {}] regionserver.HStore(327): Store=fea4de996b388410b192fd1511bd28fa/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:26:36,822 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:36,823 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:36,824 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:36,826 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:26:36,827 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1085): writing seq id for fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:36,828 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1102): Opened fea4de996b388410b192fd1511bd28fa; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71807725, jitterRate=0.07001848518848419}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:26:36,828 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1001): Region open journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:36,829 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., pid=99, masterSystemTime=1732130796803 2024-11-20T19:26:36,830 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:36,830 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:36,831 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=fea4de996b388410b192fd1511bd28fa, regionState=OPEN, openSeqNum=5, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:36,832 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=97 2024-11-20T19:26:36,832 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=97, state=SUCCESS; OpenRegionProcedure fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 in 181 msec 2024-11-20T19:26:36,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-11-20T19:26:36,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=fea4de996b388410b192fd1511bd28fa, REOPEN/MOVE in 498 msec 2024-11-20T19:26:36,834 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-20T19:26:36,834 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 503 msec 2024-11-20T19:26:36,835 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 926 msec 2024-11-20T19:26:36,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-11-20T19:26:36,837 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3637e4c6 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51f7d511 2024-11-20T19:26:36,904 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75b14fbd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:36,905 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72f422b4 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1dc42ea6 2024-11-20T19:26:36,917 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62f74604, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:36,918 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2df33cdf to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@117e86d9 2024-11-20T19:26:36,926 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49e13594, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:36,927 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09f472e0 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6cd96549 2024-11-20T19:26:36,934 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c54a0d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:36,935 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x167a78b0 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@31aea41b 2024-11-20T19:26:36,943 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3875c8c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:36,943 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1e247aa1 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@801ba40 2024-11-20T19:26:36,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@319559be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:36,952 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2205f666 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@27539bdc 2024-11-20T19:26:36,960 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c907e21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:36,961 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6584e9ce to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e3203d9 2024-11-20T19:26:36,968 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61ec0f48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:36,969 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x37ec8e3b to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@798e7fd4 2024-11-20T19:26:36,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7819b9e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:36,977 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x787e5169 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7284f16d 2024-11-20T19:26:36,985 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47679076, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:26:36,990 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:36,990 DEBUG [hconnection-0x63e810b9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:36,991 DEBUG [hconnection-0x5d05dd49-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:36,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees 2024-11-20T19:26:36,991 DEBUG [hconnection-0xcc57515-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:36,992 DEBUG [hconnection-0x77feb63b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:36,992 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:36,992 DEBUG [hconnection-0x7e784bc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:36,992 DEBUG [hconnection-0x195370e5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:36,992 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39792, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:36,992 DEBUG [hconnection-0x2270e746-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:36,992 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39796, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:36,993 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:36,993 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:36,993 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:36,993 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39810, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:36,993 DEBUG [hconnection-0xf12ca5a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:36,993 DEBUG [hconnection-0x7396aebe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:36,994 DEBUG [hconnection-0x5c5fb5df-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:26:36,994 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39832, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:36,995 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39782, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:36,995 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39798, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:36,995 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39818, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:36,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T19:26:36,996 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39848, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:36,996 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39836, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:26:36,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:36,999 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:26:36,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:36,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:36,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:36,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:36,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:36,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:37,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130857017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130857022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,026 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209926ad479efe404e84cbc1aa4690f771_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130796998/Put/seqid=0 2024-11-20T19:26:37,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130857023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130857024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742214_1390 (size=12154) 2024-11-20T19:26:37,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130857025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,033 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:37,036 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209926ad479efe404e84cbc1aa4690f771_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209926ad479efe404e84cbc1aa4690f771_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:37,037 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/8289b3c96c454d81bfbb1fe806c7a026, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:37,037 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/8289b3c96c454d81bfbb1fe806c7a026 is 175, key is test_row_0/A:col10/1732130796998/Put/seqid=0 2024-11-20T19:26:37,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742215_1391 (size=30955) 2024-11-20T19:26:37,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T19:26:37,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130857125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130857126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130857129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130857129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,134 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130857132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,145 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,146 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T19:26:37,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:37,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:37,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:37,147 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T19:26:37,299 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,299 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T19:26:37,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:37,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:37,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:37,299 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130857329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130857330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130857331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130857331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130857335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,445 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/8289b3c96c454d81bfbb1fe806c7a026 2024-11-20T19:26:37,451 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T19:26:37,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:37,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:37,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:37,452 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/1e827f2dfecb49eaacaa1ef2dc970786 is 50, key is test_row_0/B:col10/1732130796998/Put/seqid=0 2024-11-20T19:26:37,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742216_1392 (size=12001) 2024-11-20T19:26:37,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T19:26:37,605 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,606 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T19:26:37,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:37,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:37,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:37,606 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130857633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130857634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130857634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130857635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:37,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130857640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,758 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,758 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T19:26:37,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:37,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:37,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:37,759 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,882 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/1e827f2dfecb49eaacaa1ef2dc970786 2024-11-20T19:26:37,901 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/98e07125fddb48cfaed880a446739450 is 50, key is test_row_0/C:col10/1732130796998/Put/seqid=0 2024-11-20T19:26:37,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742217_1393 (size=12001) 2024-11-20T19:26:37,910 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:37,911 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T19:26:37,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:37,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:37,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:37,911 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:37,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:38,063 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:38,063 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T19:26:38,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:38,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:38,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:38,063 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:38,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:38,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:38,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T19:26:38,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130858138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:38,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:38,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130858138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130858139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:38,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130858141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:38,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130858143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:38,215 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:38,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T19:26:38,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:38,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:38,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:38,216 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:38,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:38,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:38,308 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/98e07125fddb48cfaed880a446739450 2024-11-20T19:26:38,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/8289b3c96c454d81bfbb1fe806c7a026 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/8289b3c96c454d81bfbb1fe806c7a026 2024-11-20T19:26:38,315 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/8289b3c96c454d81bfbb1fe806c7a026, entries=150, sequenceid=16, filesize=30.2 K 2024-11-20T19:26:38,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/1e827f2dfecb49eaacaa1ef2dc970786 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/1e827f2dfecb49eaacaa1ef2dc970786 2024-11-20T19:26:38,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/1e827f2dfecb49eaacaa1ef2dc970786, entries=150, sequenceid=16, filesize=11.7 K 2024-11-20T19:26:38,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/98e07125fddb48cfaed880a446739450 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/98e07125fddb48cfaed880a446739450 2024-11-20T19:26:38,322 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/98e07125fddb48cfaed880a446739450, entries=150, sequenceid=16, filesize=11.7 K 2024-11-20T19:26:38,323 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for fea4de996b388410b192fd1511bd28fa in 1325ms, sequenceid=16, compaction requested=false 2024-11-20T19:26:38,323 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-20T19:26:38,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:38,368 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:38,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-11-20T19:26:38,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:38,369 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:26:38,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:38,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:38,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:38,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:38,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:38,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:38,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120901a44d3f09c42bbbec154786dd77343_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130797023/Put/seqid=0 2024-11-20T19:26:38,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742218_1394 (size=12154) 2024-11-20T19:26:38,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,383 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120901a44d3f09c42bbbec154786dd77343_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120901a44d3f09c42bbbec154786dd77343_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:38,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/64e1d715b2a64b4c857dd35300bf0468, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:38,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/64e1d715b2a64b4c857dd35300bf0468 is 175, key is test_row_0/A:col10/1732130797023/Put/seqid=0 2024-11-20T19:26:38,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742219_1395 (size=30955) 2024-11-20T19:26:38,803 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/64e1d715b2a64b4c857dd35300bf0468 2024-11-20T19:26:38,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/74c2b6bc48bf41498e12d917cf7eb4ac is 50, key is test_row_0/B:col10/1732130797023/Put/seqid=0 2024-11-20T19:26:38,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742220_1396 (size=12001) 2024-11-20T19:26:38,813 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/74c2b6bc48bf41498e12d917cf7eb4ac 2024-11-20T19:26:38,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/00c32907460e4b9a9e4040ec9521daa4 is 50, key is test_row_0/C:col10/1732130797023/Put/seqid=0 2024-11-20T19:26:38,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742221_1397 (size=12001) 2024-11-20T19:26:38,825 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/00c32907460e4b9a9e4040ec9521daa4 2024-11-20T19:26:38,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/64e1d715b2a64b4c857dd35300bf0468 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/64e1d715b2a64b4c857dd35300bf0468 2024-11-20T19:26:38,832 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/64e1d715b2a64b4c857dd35300bf0468, entries=150, sequenceid=41, filesize=30.2 K 2024-11-20T19:26:38,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/74c2b6bc48bf41498e12d917cf7eb4ac as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/74c2b6bc48bf41498e12d917cf7eb4ac 2024-11-20T19:26:38,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,835 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/74c2b6bc48bf41498e12d917cf7eb4ac, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T19:26:38,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/00c32907460e4b9a9e4040ec9521daa4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/00c32907460e4b9a9e4040ec9521daa4 2024-11-20T19:26:38,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,839 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/00c32907460e4b9a9e4040ec9521daa4, entries=150, sequenceid=41, filesize=11.7 K 2024-11-20T19:26:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,840 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for fea4de996b388410b192fd1511bd28fa in 471ms, sequenceid=41, compaction requested=false 2024-11-20T19:26:38,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:38,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:38,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-11-20T19:26:38,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=101 2024-11-20T19:26:38,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,842 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-11-20T19:26:38,842 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8480 sec 2024-11-20T19:26:38,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees in 1.8520 sec 2024-11-20T19:26:38,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,911 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T19:26:38,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:38,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-11-20T19:26:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,099 INFO [Thread-1746 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-11-20T19:26:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,101 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:39,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-11-20T19:26:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T19:26:39,102 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,103 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:39,103 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T19:26:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:39,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,229 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:26:39,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:39,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:39,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:39,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:39,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:39,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:39,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200ade6be11ca54bd7b75dd062032e3a50_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130799212/Put/seqid=0 2024-11-20T19:26:39,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,254 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,254 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T19:26:39,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:39,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:39,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:39,257 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:39,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:39,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:39,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742223_1399 (size=24358) 2024-11-20T19:26:39,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,270 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,276 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200ade6be11ca54bd7b75dd062032e3a50_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200ade6be11ca54bd7b75dd062032e3a50_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:39,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,277 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/c8e1b132fd874ba087248d32f959688b, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:39,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/c8e1b132fd874ba087248d32f959688b is 175, key is test_row_0/A:col10/1732130799212/Put/seqid=0 2024-11-20T19:26:39,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742222_1398 (size=73994) 2024-11-20T19:26:39,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,279 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/c8e1b132fd874ba087248d32f959688b 2024-11-20T19:26:39,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,290 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/41e1fe1594f845bc91334a2c01cfe9ad is 50, key is test_row_0/B:col10/1732130799212/Put/seqid=0 2024-11-20T19:26:39,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742224_1400 (size=23705) 2024-11-20T19:26:39,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130859281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130859288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:39,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130859291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130859294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130859293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130859397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T19:26:39,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130859407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,409 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,411 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T19:26:39,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:39,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:39,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:39,411 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:39,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:39,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:39,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130859410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130859411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130859412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,563 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,564 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T19:26:39,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:39,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:39,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:39,564 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:39,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:39,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:39,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130859602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130859610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130859617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130859617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130859617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,695 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/41e1fe1594f845bc91334a2c01cfe9ad 2024-11-20T19:26:39,701 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/eefd6d69572c4090af99b5b701a9f2d0 is 50, key is test_row_0/C:col10/1732130799212/Put/seqid=0 2024-11-20T19:26:39,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T19:26:39,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742225_1401 (size=12001) 2024-11-20T19:26:39,715 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T19:26:39,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:39,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:39,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:39,716 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:39,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:39,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:39,867 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,868 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T19:26:39,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:39,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:39,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:39,868 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:39,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:39,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:39,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130859906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130859915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130859923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130859924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:39,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:39,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130859925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:40,019 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:40,020 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T19:26:40,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:40,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:40,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:40,020 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:40,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:40,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/eefd6d69572c4090af99b5b701a9f2d0 2024-11-20T19:26:40,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/c8e1b132fd874ba087248d32f959688b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c8e1b132fd874ba087248d32f959688b 2024-11-20T19:26:40,111 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c8e1b132fd874ba087248d32f959688b, entries=400, sequenceid=52, filesize=72.3 K 2024-11-20T19:26:40,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/41e1fe1594f845bc91334a2c01cfe9ad as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/41e1fe1594f845bc91334a2c01cfe9ad 2024-11-20T19:26:40,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/41e1fe1594f845bc91334a2c01cfe9ad, entries=400, sequenceid=52, filesize=23.1 K 2024-11-20T19:26:40,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/eefd6d69572c4090af99b5b701a9f2d0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/eefd6d69572c4090af99b5b701a9f2d0 2024-11-20T19:26:40,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/eefd6d69572c4090af99b5b701a9f2d0, entries=150, sequenceid=52, filesize=11.7 K 2024-11-20T19:26:40,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for fea4de996b388410b192fd1511bd28fa in 891ms, sequenceid=52, compaction requested=true 2024-11-20T19:26:40,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:40,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:40,121 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:40,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:40,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:40,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:40,121 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:40,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:40,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:40,121 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 47707 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:40,121 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 135904 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:40,121 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/A is initiating minor compaction (all files) 2024-11-20T19:26:40,121 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/B is initiating minor compaction (all files) 2024-11-20T19:26:40,121 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/B in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:40,121 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/A in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:40,122 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/1e827f2dfecb49eaacaa1ef2dc970786, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/74c2b6bc48bf41498e12d917cf7eb4ac, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/41e1fe1594f845bc91334a2c01cfe9ad] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=46.6 K 2024-11-20T19:26:40,122 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/8289b3c96c454d81bfbb1fe806c7a026, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/64e1d715b2a64b4c857dd35300bf0468, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c8e1b132fd874ba087248d32f959688b] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=132.7 K 2024-11-20T19:26:40,122 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:40,122 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/8289b3c96c454d81bfbb1fe806c7a026, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/64e1d715b2a64b4c857dd35300bf0468, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c8e1b132fd874ba087248d32f959688b] 2024-11-20T19:26:40,122 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e827f2dfecb49eaacaa1ef2dc970786, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732130796995 2024-11-20T19:26:40,122 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8289b3c96c454d81bfbb1fe806c7a026, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732130796995 2024-11-20T19:26:40,122 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 74c2b6bc48bf41498e12d917cf7eb4ac, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732130797021 2024-11-20T19:26:40,122 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64e1d715b2a64b4c857dd35300bf0468, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732130797021 2024-11-20T19:26:40,122 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 41e1fe1594f845bc91334a2c01cfe9ad, keycount=400, bloomtype=ROW, size=23.1 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130799208 2024-11-20T19:26:40,122 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8e1b132fd874ba087248d32f959688b, keycount=400, bloomtype=ROW, size=72.3 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130799208 2024-11-20T19:26:40,128 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:40,129 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#B#compaction#342 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:40,129 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/9e3a00a20b2d493d98bfc728013ae654 is 50, key is test_row_0/B:col10/1732130799212/Put/seqid=0 2024-11-20T19:26:40,130 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411204ffbcdbfaf5d4ecd8f94c5a4e0ae5c7a_fea4de996b388410b192fd1511bd28fa store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:40,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742226_1402 (size=12104) 2024-11-20T19:26:40,133 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411204ffbcdbfaf5d4ecd8f94c5a4e0ae5c7a_fea4de996b388410b192fd1511bd28fa, store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:40,133 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204ffbcdbfaf5d4ecd8f94c5a4e0ae5c7a_fea4de996b388410b192fd1511bd28fa because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:40,137 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/9e3a00a20b2d493d98bfc728013ae654 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/9e3a00a20b2d493d98bfc728013ae654 2024-11-20T19:26:40,141 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/B of fea4de996b388410b192fd1511bd28fa into 9e3a00a20b2d493d98bfc728013ae654(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:40,141 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:40,141 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/B, priority=13, startTime=1732130800121; duration=0sec 2024-11-20T19:26:40,141 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:40,141 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:B 2024-11-20T19:26:40,141 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:40,142 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:40,142 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/C is initiating minor compaction (all files) 2024-11-20T19:26:40,142 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/C in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:40,142 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/98e07125fddb48cfaed880a446739450, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/00c32907460e4b9a9e4040ec9521daa4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/eefd6d69572c4090af99b5b701a9f2d0] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=35.2 K 2024-11-20T19:26:40,142 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 98e07125fddb48cfaed880a446739450, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732130796995 2024-11-20T19:26:40,143 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 00c32907460e4b9a9e4040ec9521daa4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732130797021 2024-11-20T19:26:40,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742227_1403 (size=4469) 2024-11-20T19:26:40,144 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting eefd6d69572c4090af99b5b701a9f2d0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130799212 2024-11-20T19:26:40,145 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#A#compaction#343 average throughput is 1.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:40,145 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/0b415765e8a9485cb97b3385bc5ac1fd is 175, key is test_row_0/A:col10/1732130799212/Put/seqid=0 2024-11-20T19:26:40,151 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#C#compaction#344 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:40,151 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/c0d873304ab04c13a1020b0c18e95e50 is 50, key is test_row_0/C:col10/1732130799212/Put/seqid=0 2024-11-20T19:26:40,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742228_1404 (size=31058) 2024-11-20T19:26:40,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742229_1405 (size=12104) 2024-11-20T19:26:40,158 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/0b415765e8a9485cb97b3385bc5ac1fd as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/0b415765e8a9485cb97b3385bc5ac1fd 2024-11-20T19:26:40,163 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/A of fea4de996b388410b192fd1511bd28fa into 0b415765e8a9485cb97b3385bc5ac1fd(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:40,163 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:40,163 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/A, priority=13, startTime=1732130800120; duration=0sec 2024-11-20T19:26:40,163 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:40,163 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:A 2024-11-20T19:26:40,166 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/c0d873304ab04c13a1020b0c18e95e50 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c0d873304ab04c13a1020b0c18e95e50 2024-11-20T19:26:40,170 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/C of fea4de996b388410b192fd1511bd28fa into c0d873304ab04c13a1020b0c18e95e50(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:40,170 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:40,170 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/C, priority=13, startTime=1732130800121; duration=0sec 2024-11-20T19:26:40,170 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:40,170 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:C 2024-11-20T19:26:40,172 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:40,172 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-20T19:26:40,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:40,172 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:26:40,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:40,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:40,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:40,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:40,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:40,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:40,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120812f59108f76458cad3dd75eac0d421f_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130799285/Put/seqid=0 2024-11-20T19:26:40,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T19:26:40,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742230_1406 (size=12154) 2024-11-20T19:26:40,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:40,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:40,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:40,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130860431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:40,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:40,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:40,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130860432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:40,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130860432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:40,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:40,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130860432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:40,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:40,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130860435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:40,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:40,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130860533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:40,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:40,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130860536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:40,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:40,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130860538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:40,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:40,640 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120812f59108f76458cad3dd75eac0d421f_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120812f59108f76458cad3dd75eac0d421f_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:40,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/f396d3c88a1c471bbce12f73f56991e4, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:40,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/f396d3c88a1c471bbce12f73f56991e4 is 175, key is test_row_0/A:col10/1732130799285/Put/seqid=0 2024-11-20T19:26:40,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742231_1407 (size=30955) 2024-11-20T19:26:40,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:40,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130860737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:40,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:40,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130860740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:40,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:40,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130860742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:41,045 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=80, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/f396d3c88a1c471bbce12f73f56991e4 2024-11-20T19:26:41,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:41,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130861046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:41,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:41,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130861046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:41,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:41,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130861046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:41,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/8f6653cb4d774e7cbf57d04b0c36258b is 50, key is test_row_0/B:col10/1732130799285/Put/seqid=0 2024-11-20T19:26:41,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742232_1408 (size=12001) 2024-11-20T19:26:41,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T19:26:41,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:41,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130861439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:41,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:41,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130861439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:41,455 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/8f6653cb4d774e7cbf57d04b0c36258b 2024-11-20T19:26:41,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/9bba6115a6ec46cc819f34f12de7991c is 50, key is test_row_0/C:col10/1732130799285/Put/seqid=0 2024-11-20T19:26:41,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742233_1409 (size=12001) 2024-11-20T19:26:41,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:41,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130861550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:41,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:41,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130861552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:41,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:41,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130861553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:41,863 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/9bba6115a6ec46cc819f34f12de7991c 2024-11-20T19:26:41,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/f396d3c88a1c471bbce12f73f56991e4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f396d3c88a1c471bbce12f73f56991e4 2024-11-20T19:26:41,870 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f396d3c88a1c471bbce12f73f56991e4, entries=150, sequenceid=80, filesize=30.2 K 2024-11-20T19:26:41,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/8f6653cb4d774e7cbf57d04b0c36258b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/8f6653cb4d774e7cbf57d04b0c36258b 2024-11-20T19:26:41,875 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/8f6653cb4d774e7cbf57d04b0c36258b, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T19:26:41,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/9bba6115a6ec46cc819f34f12de7991c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/9bba6115a6ec46cc819f34f12de7991c 2024-11-20T19:26:41,879 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/9bba6115a6ec46cc819f34f12de7991c, entries=150, sequenceid=80, filesize=11.7 K 2024-11-20T19:26:41,891 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for fea4de996b388410b192fd1511bd28fa in 1719ms, sequenceid=80, compaction requested=false 2024-11-20T19:26:41,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:41,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:41,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-20T19:26:41,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-11-20T19:26:41,893 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-20T19:26:41,893 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7890 sec 2024-11-20T19:26:41,894 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 2.7920 sec 2024-11-20T19:26:42,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:42,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:26:42,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:42,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:42,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:42,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:42,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:42,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:42,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e31fded4768e462c8b461329b40a848a_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130800431/Put/seqid=0 2024-11-20T19:26:42,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742234_1410 (size=14594) 2024-11-20T19:26:42,594 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:42,597 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120e31fded4768e462c8b461329b40a848a_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e31fded4768e462c8b461329b40a848a_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:42,598 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/f8cea79454954cc189dc588be536078b, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:42,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/f8cea79454954cc189dc588be536078b is 175, key is test_row_0/A:col10/1732130800431/Put/seqid=0 2024-11-20T19:26:42,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742235_1411 (size=39549) 2024-11-20T19:26:42,606 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=92, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/f8cea79454954cc189dc588be536078b 2024-11-20T19:26:42,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/f4c85bab2d5f4c839afeb40d0e43b1de is 50, key is test_row_0/B:col10/1732130800431/Put/seqid=0 2024-11-20T19:26:42,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:42,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130862617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:42,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:42,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130862618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:42,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:42,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130862618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:42,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742236_1412 (size=12001) 2024-11-20T19:26:42,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/f4c85bab2d5f4c839afeb40d0e43b1de 2024-11-20T19:26:42,661 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/34b05360040e42b794b823f7b88be3ec is 50, key is test_row_0/C:col10/1732130800431/Put/seqid=0 2024-11-20T19:26:42,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742237_1413 (size=12001) 2024-11-20T19:26:42,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:42,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130862725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:42,732 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:42,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130862725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:42,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:42,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130862728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:42,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:42,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130862934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:42,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:42,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130862934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:42,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:42,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130862935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/34b05360040e42b794b823f7b88be3ec 2024-11-20T19:26:43,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/f8cea79454954cc189dc588be536078b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f8cea79454954cc189dc588be536078b 2024-11-20T19:26:43,089 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f8cea79454954cc189dc588be536078b, entries=200, sequenceid=92, filesize=38.6 K 2024-11-20T19:26:43,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/f4c85bab2d5f4c839afeb40d0e43b1de as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/f4c85bab2d5f4c839afeb40d0e43b1de 2024-11-20T19:26:43,094 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/f4c85bab2d5f4c839afeb40d0e43b1de, entries=150, sequenceid=92, filesize=11.7 K 2024-11-20T19:26:43,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/34b05360040e42b794b823f7b88be3ec as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/34b05360040e42b794b823f7b88be3ec 2024-11-20T19:26:43,097 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/34b05360040e42b794b823f7b88be3ec, entries=150, sequenceid=92, filesize=11.7 K 2024-11-20T19:26:43,098 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for fea4de996b388410b192fd1511bd28fa in 536ms, sequenceid=92, compaction requested=true 2024-11-20T19:26:43,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:43,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:43,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:43,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:43,098 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:43,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:43,098 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:43,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:43,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:43,099 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:43,099 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:43,099 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/A is initiating minor compaction (all files) 2024-11-20T19:26:43,099 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/B is initiating minor compaction (all files) 2024-11-20T19:26:43,099 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/A in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:43,099 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/B in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:43,099 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/9e3a00a20b2d493d98bfc728013ae654, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/8f6653cb4d774e7cbf57d04b0c36258b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/f4c85bab2d5f4c839afeb40d0e43b1de] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=35.3 K 2024-11-20T19:26:43,099 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/0b415765e8a9485cb97b3385bc5ac1fd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f396d3c88a1c471bbce12f73f56991e4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f8cea79454954cc189dc588be536078b] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=99.2 K 2024-11-20T19:26:43,099 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:43,099 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/0b415765e8a9485cb97b3385bc5ac1fd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f396d3c88a1c471bbce12f73f56991e4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f8cea79454954cc189dc588be536078b] 2024-11-20T19:26:43,099 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e3a00a20b2d493d98bfc728013ae654, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130799212 2024-11-20T19:26:43,099 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b415765e8a9485cb97b3385bc5ac1fd, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130799212 2024-11-20T19:26:43,099 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f6653cb4d774e7cbf57d04b0c36258b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732130799285 2024-11-20T19:26:43,100 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting f396d3c88a1c471bbce12f73f56991e4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732130799285 2024-11-20T19:26:43,100 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting f4c85bab2d5f4c839afeb40d0e43b1de, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732130800431 2024-11-20T19:26:43,100 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8cea79454954cc189dc588be536078b, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732130800430 2024-11-20T19:26:43,113 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:43,113 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#B#compaction#351 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:43,114 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/8b111285a92b40be84016a7bf68824b2 is 50, key is test_row_0/B:col10/1732130800431/Put/seqid=0 2024-11-20T19:26:43,114 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112032a4cfc8ba6a4d7d99ae40a5a9b69603_fea4de996b388410b192fd1511bd28fa store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:43,116 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112032a4cfc8ba6a4d7d99ae40a5a9b69603_fea4de996b388410b192fd1511bd28fa, store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:43,116 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112032a4cfc8ba6a4d7d99ae40a5a9b69603_fea4de996b388410b192fd1511bd28fa because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:43,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742238_1414 (size=12207) 2024-11-20T19:26:43,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742239_1415 (size=4469) 2024-11-20T19:26:43,120 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#A#compaction#352 average throughput is 3.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:43,120 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/d15ce8b2fb7f4951a1bd0b7dd356786c is 175, key is test_row_0/A:col10/1732130800431/Put/seqid=0 2024-11-20T19:26:43,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742240_1416 (size=31161) 2024-11-20T19:26:43,129 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/d15ce8b2fb7f4951a1bd0b7dd356786c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d15ce8b2fb7f4951a1bd0b7dd356786c 2024-11-20T19:26:43,134 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/A of fea4de996b388410b192fd1511bd28fa into d15ce8b2fb7f4951a1bd0b7dd356786c(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:43,134 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:43,135 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/A, priority=13, startTime=1732130803098; duration=0sec 2024-11-20T19:26:43,135 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:43,135 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:A 2024-11-20T19:26:43,135 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:43,138 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:43,138 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/C is initiating minor compaction (all files) 2024-11-20T19:26:43,138 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/C in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:43,138 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c0d873304ab04c13a1020b0c18e95e50, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/9bba6115a6ec46cc819f34f12de7991c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/34b05360040e42b794b823f7b88be3ec] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=35.3 K 2024-11-20T19:26:43,138 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0d873304ab04c13a1020b0c18e95e50, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130799212 2024-11-20T19:26:43,139 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9bba6115a6ec46cc819f34f12de7991c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1732130799285 2024-11-20T19:26:43,139 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34b05360040e42b794b823f7b88be3ec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732130800431 2024-11-20T19:26:43,143 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#C#compaction#353 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:43,144 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/e6a4bb766b0e4debb7353c15a15c9286 is 50, key is test_row_0/C:col10/1732130800431/Put/seqid=0 2024-11-20T19:26:43,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742241_1417 (size=12207) 2024-11-20T19:26:43,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-20T19:26:43,207 INFO [Thread-1746 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-11-20T19:26:43,208 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:43,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-11-20T19:26:43,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T19:26:43,209 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:43,210 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:43,210 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:43,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:43,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:26:43,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:43,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:43,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:43,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:43,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:43,245 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:43,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dda8839828ab4ea4ac5b5391f6be06a3_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130802617/Put/seqid=0 2024-11-20T19:26:43,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742242_1418 (size=17034) 2024-11-20T19:26:43,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130863259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130863260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130863260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T19:26:43,360 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T19:26:43,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:43,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:43,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:43,361 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130863362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130863368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130863368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130863448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,450 DEBUG [Thread-1740 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4159 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., hostname=db9c3a6c6492,35979,1732130703276, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:26:43,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130863454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,457 DEBUG [Thread-1744 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4170 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., hostname=db9c3a6c6492,35979,1732130703276, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:26:43,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T19:26:43,514 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T19:26:43,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:43,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:43,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:43,515 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,523 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/8b111285a92b40be84016a7bf68824b2 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/8b111285a92b40be84016a7bf68824b2 2024-11-20T19:26:43,526 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/B of fea4de996b388410b192fd1511bd28fa into 8b111285a92b40be84016a7bf68824b2(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:43,527 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:43,527 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/B, priority=13, startTime=1732130803098; duration=0sec 2024-11-20T19:26:43,527 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:43,527 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:B 2024-11-20T19:26:43,556 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/e6a4bb766b0e4debb7353c15a15c9286 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/e6a4bb766b0e4debb7353c15a15c9286 2024-11-20T19:26:43,561 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/C of fea4de996b388410b192fd1511bd28fa into e6a4bb766b0e4debb7353c15a15c9286(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:43,561 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:43,561 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/C, priority=13, startTime=1732130803098; duration=0sec 2024-11-20T19:26:43,561 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:43,561 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:C 2024-11-20T19:26:43,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130863568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130863574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130863575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,658 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:43,661 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dda8839828ab4ea4ac5b5391f6be06a3_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dda8839828ab4ea4ac5b5391f6be06a3_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:43,662 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/cf365753500a44e0894f7a423b730d66, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:43,663 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/cf365753500a44e0894f7a423b730d66 is 175, key is test_row_0/A:col10/1732130802617/Put/seqid=0 2024-11-20T19:26:43,667 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T19:26:43,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:43,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:43,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:43,668 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:43,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742243_1419 (size=48139) 2024-11-20T19:26:43,695 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/cf365753500a44e0894f7a423b730d66 2024-11-20T19:26:43,703 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/693b25271063405fa697fd4c3bbd1242 is 50, key is test_row_0/B:col10/1732130802617/Put/seqid=0 2024-11-20T19:26:43,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742244_1420 (size=12001) 2024-11-20T19:26:43,731 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/693b25271063405fa697fd4c3bbd1242 2024-11-20T19:26:43,741 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/cc6d12b5528a4be2a87fba1282558282 is 50, key is test_row_0/C:col10/1732130802617/Put/seqid=0 2024-11-20T19:26:43,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742245_1421 (size=12001) 2024-11-20T19:26:43,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/cc6d12b5528a4be2a87fba1282558282 2024-11-20T19:26:43,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/cf365753500a44e0894f7a423b730d66 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/cf365753500a44e0894f7a423b730d66 2024-11-20T19:26:43,772 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/cf365753500a44e0894f7a423b730d66, entries=250, sequenceid=119, filesize=47.0 K 2024-11-20T19:26:43,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/693b25271063405fa697fd4c3bbd1242 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/693b25271063405fa697fd4c3bbd1242 2024-11-20T19:26:43,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/693b25271063405fa697fd4c3bbd1242, entries=150, sequenceid=119, filesize=11.7 K 2024-11-20T19:26:43,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/cc6d12b5528a4be2a87fba1282558282 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/cc6d12b5528a4be2a87fba1282558282 2024-11-20T19:26:43,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/cc6d12b5528a4be2a87fba1282558282, entries=150, sequenceid=119, filesize=11.7 K 2024-11-20T19:26:43,785 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for fea4de996b388410b192fd1511bd28fa in 541ms, sequenceid=119, compaction requested=false 2024-11-20T19:26:43,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:43,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T19:26:43,820 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,820 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-20T19:26:43,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:43,820 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T19:26:43,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:43,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:43,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:43,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:43,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:43,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:43,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112035cf29082dc146e399fc26585cd48f70_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130803259/Put/seqid=0 2024-11-20T19:26:43,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742246_1422 (size=12154) 2024-11-20T19:26:43,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:43,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:43,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130863955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130863957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:43,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:43,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130863957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:44,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130864066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:44,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130864067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:44,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130864068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:44,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:44,265 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112035cf29082dc146e399fc26585cd48f70_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112035cf29082dc146e399fc26585cd48f70_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:44,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/a7b44f8407c34845ad23b46dc8e81807, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:44,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/a7b44f8407c34845ad23b46dc8e81807 is 175, key is test_row_0/A:col10/1732130803259/Put/seqid=0 2024-11-20T19:26:44,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130864276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:44,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130864277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:44,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130864277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:44,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742247_1423 (size=30955) 2024-11-20T19:26:44,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T19:26:44,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130864585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:44,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130864585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:44,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:44,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130864587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:44,709 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=131, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/a7b44f8407c34845ad23b46dc8e81807 2024-11-20T19:26:44,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/0b6622d32c8940c2a0b357cca45f1794 is 50, key is test_row_0/B:col10/1732130803259/Put/seqid=0 2024-11-20T19:26:44,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742248_1424 (size=12001) 2024-11-20T19:26:44,796 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/0b6622d32c8940c2a0b357cca45f1794 2024-11-20T19:26:44,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/d75ce22f6cb54b2589ca7038d312bb8e is 50, key is test_row_0/C:col10/1732130803259/Put/seqid=0 2024-11-20T19:26:44,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742249_1425 (size=12001) 2024-11-20T19:26:45,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130865094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:45,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130865096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:45,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:45,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130865102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:45,265 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/d75ce22f6cb54b2589ca7038d312bb8e 2024-11-20T19:26:45,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/a7b44f8407c34845ad23b46dc8e81807 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/a7b44f8407c34845ad23b46dc8e81807 2024-11-20T19:26:45,274 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/a7b44f8407c34845ad23b46dc8e81807, entries=150, sequenceid=131, filesize=30.2 K 2024-11-20T19:26:45,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/0b6622d32c8940c2a0b357cca45f1794 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/0b6622d32c8940c2a0b357cca45f1794 2024-11-20T19:26:45,294 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/0b6622d32c8940c2a0b357cca45f1794, entries=150, sequenceid=131, filesize=11.7 K 2024-11-20T19:26:45,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/d75ce22f6cb54b2589ca7038d312bb8e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/d75ce22f6cb54b2589ca7038d312bb8e 2024-11-20T19:26:45,298 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/d75ce22f6cb54b2589ca7038d312bb8e, entries=150, sequenceid=131, filesize=11.7 K 2024-11-20T19:26:45,299 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for fea4de996b388410b192fd1511bd28fa in 1479ms, sequenceid=131, compaction requested=true 2024-11-20T19:26:45,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:45,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:45,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-20T19:26:45,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-11-20T19:26:45,302 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-20T19:26:45,302 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0910 sec 2024-11-20T19:26:45,304 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 2.0950 sec 2024-11-20T19:26:45,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-20T19:26:45,313 INFO [Thread-1746 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-20T19:26:45,314 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:45,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-20T19:26:45,316 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:45,316 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:45,317 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:45,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T19:26:45,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T19:26:45,469 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:45,469 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-20T19:26:45,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:45,469 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:26:45,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:45,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:45,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:45,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:45,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:45,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:45,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120492663a30d0649699fdf13b94d50db1a_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130803955/Put/seqid=0 2024-11-20T19:26:45,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742250_1426 (size=12304) 2024-11-20T19:26:45,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,524 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120492663a30d0649699fdf13b94d50db1a_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120492663a30d0649699fdf13b94d50db1a_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:45,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/c9873e94cfc142fb99b30cb6d897c4d6, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:45,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/c9873e94cfc142fb99b30cb6d897c4d6 is 175, key is test_row_0/A:col10/1732130803955/Put/seqid=0 2024-11-20T19:26:45,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742251_1427 (size=31105) 2024-11-20T19:26:45,541 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/c9873e94cfc142fb99b30cb6d897c4d6 2024-11-20T19:26:45,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/d621885a67f745e6ad5ab668ced4a843 is 50, key is test_row_0/B:col10/1732130803955/Put/seqid=0 2024-11-20T19:26:45,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742252_1428 (size=12151) 2024-11-20T19:26:45,589 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/d621885a67f745e6ad5ab668ced4a843 2024-11-20T19:26:45,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/84e0f7404b7a40dcacd05bf5949fc9a5 is 50, key is test_row_0/C:col10/1732130803955/Put/seqid=0 2024-11-20T19:26:45,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T19:26:45,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742253_1429 (size=12151) 2024-11-20T19:26:45,633 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/84e0f7404b7a40dcacd05bf5949fc9a5 2024-11-20T19:26:45,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/c9873e94cfc142fb99b30cb6d897c4d6 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c9873e94cfc142fb99b30cb6d897c4d6 2024-11-20T19:26:45,643 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c9873e94cfc142fb99b30cb6d897c4d6, entries=150, sequenceid=157, filesize=30.4 K 2024-11-20T19:26:45,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/d621885a67f745e6ad5ab668ced4a843 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/d621885a67f745e6ad5ab668ced4a843 2024-11-20T19:26:45,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,651 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/d621885a67f745e6ad5ab668ced4a843, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T19:26:45,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/84e0f7404b7a40dcacd05bf5949fc9a5 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/84e0f7404b7a40dcacd05bf5949fc9a5 2024-11-20T19:26:45,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,656 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/84e0f7404b7a40dcacd05bf5949fc9a5, entries=150, sequenceid=157, filesize=11.9 K 2024-11-20T19:26:45,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,657 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=0 B/0 for fea4de996b388410b192fd1511bd28fa in 188ms, sequenceid=157, compaction requested=true 2024-11-20T19:26:45,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:45,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:45,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-20T19:26:45,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-20T19:26:45,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,659 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-20T19:26:45,659 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 341 msec 2024-11-20T19:26:45,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,661 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 346 msec 2024-11-20T19:26:45,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-20T19:26:45,925 INFO [Thread-1746 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-20T19:26:45,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,927 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:45,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-20T19:26:45,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T19:26:45,928 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:45,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,929 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:45,929 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:45,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:45,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T19:26:46,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,081 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:46,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-20T19:26:46,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:46,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:46,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:46,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-20T19:26:46,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-20T19:26:46,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-20T19:26:46,085 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 155 msec 2024-11-20T19:26:46,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,087 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 159 msec 2024-11-20T19:26:46,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-20T19:26:46,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,230 INFO [Thread-1746 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-20T19:26:46,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,232 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:46,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-20T19:26:46,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,233 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:46,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T19:26:46,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,234 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:46,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:46,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,329 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:26:46,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:46,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:46,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:46,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:46,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:46,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:46,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T19:26:46,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,362 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d8dd23c32bc04ccab4840ad4ba92ed6b_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130806290/Put/seqid=0 2024-11-20T19:26:46,385 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:46,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T19:26:46,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:46,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:46,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:46,388 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742254_1430 (size=19774) 2024-11-20T19:26:46,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130866498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:46,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130866502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:46,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130866513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T19:26:46,540 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:46,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T19:26:46,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:46,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:46,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:46,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130866614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:46,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130866616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:46,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130866625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:46,692 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:46,693 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T19:26:46,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:46,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:46,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:46,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,804 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:46,808 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d8dd23c32bc04ccab4840ad4ba92ed6b_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d8dd23c32bc04ccab4840ad4ba92ed6b_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:46,810 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/9b4ba5ff8d89444f8c4e66602789601e, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:46,811 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/9b4ba5ff8d89444f8c4e66602789601e is 175, key is test_row_0/A:col10/1732130806290/Put/seqid=0 2024-11-20T19:26:46,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130866823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:46,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T19:26:46,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130866833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:46,846 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:46,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T19:26:46,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:46,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:46,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:46,846 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:46,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130866840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:46,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742255_1431 (size=57033) 2024-11-20T19:26:46,998 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:46,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T19:26:46,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:46,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:46,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:46,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:46,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:47,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:47,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130867135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:47,151 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:47,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130867145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:47,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T19:26:47,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:47,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:47,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:47,154 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:47,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:47,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:47,157 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130867152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:47,255 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=169, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/9b4ba5ff8d89444f8c4e66602789601e 2024-11-20T19:26:47,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/9a43ae8d8490409ea5fb7a67e0324809 is 50, key is test_row_0/B:col10/1732130806290/Put/seqid=0 2024-11-20T19:26:47,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742256_1432 (size=12151) 2024-11-20T19:26:47,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/9a43ae8d8490409ea5fb7a67e0324809 2024-11-20T19:26:47,308 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:47,308 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T19:26:47,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:47,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:47,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:47,308 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:47,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:47,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:47,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/ae764e832f444ff38e2afcb89624404e is 50, key is test_row_0/C:col10/1732130806290/Put/seqid=0 2024-11-20T19:26:47,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T19:26:47,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742257_1433 (size=12151) 2024-11-20T19:26:47,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/ae764e832f444ff38e2afcb89624404e 2024-11-20T19:26:47,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/9b4ba5ff8d89444f8c4e66602789601e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/9b4ba5ff8d89444f8c4e66602789601e 2024-11-20T19:26:47,376 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/9b4ba5ff8d89444f8c4e66602789601e, entries=300, sequenceid=169, filesize=55.7 K 2024-11-20T19:26:47,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/9a43ae8d8490409ea5fb7a67e0324809 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/9a43ae8d8490409ea5fb7a67e0324809 2024-11-20T19:26:47,385 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/9a43ae8d8490409ea5fb7a67e0324809, entries=150, sequenceid=169, filesize=11.9 K 2024-11-20T19:26:47,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/ae764e832f444ff38e2afcb89624404e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/ae764e832f444ff38e2afcb89624404e 2024-11-20T19:26:47,393 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/ae764e832f444ff38e2afcb89624404e, entries=150, sequenceid=169, filesize=11.9 K 2024-11-20T19:26:47,394 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for fea4de996b388410b192fd1511bd28fa in 1065ms, sequenceid=169, compaction requested=true 2024-11-20T19:26:47,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:47,394 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T19:26:47,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:47,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:47,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:47,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:47,394 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T19:26:47,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:47,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:47,395 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 198393 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T19:26:47,395 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/A is initiating minor compaction (all files) 2024-11-20T19:26:47,395 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/A in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:47,395 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d15ce8b2fb7f4951a1bd0b7dd356786c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/cf365753500a44e0894f7a423b730d66, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/a7b44f8407c34845ad23b46dc8e81807, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c9873e94cfc142fb99b30cb6d897c4d6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/9b4ba5ff8d89444f8c4e66602789601e] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=193.7 K 2024-11-20T19:26:47,395 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:47,395 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d15ce8b2fb7f4951a1bd0b7dd356786c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/cf365753500a44e0894f7a423b730d66, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/a7b44f8407c34845ad23b46dc8e81807, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c9873e94cfc142fb99b30cb6d897c4d6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/9b4ba5ff8d89444f8c4e66602789601e] 2024-11-20T19:26:47,396 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d15ce8b2fb7f4951a1bd0b7dd356786c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732130800431 2024-11-20T19:26:47,396 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60511 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T19:26:47,396 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/B is initiating minor compaction (all files) 2024-11-20T19:26:47,396 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/B in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:47,396 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/8b111285a92b40be84016a7bf68824b2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/693b25271063405fa697fd4c3bbd1242, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/0b6622d32c8940c2a0b357cca45f1794, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/d621885a67f745e6ad5ab668ced4a843, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/9a43ae8d8490409ea5fb7a67e0324809] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=59.1 K 2024-11-20T19:26:47,396 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf365753500a44e0894f7a423b730d66, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732130802591 2024-11-20T19:26:47,398 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b111285a92b40be84016a7bf68824b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732130800431 2024-11-20T19:26:47,398 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7b44f8407c34845ad23b46dc8e81807, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732130803245 2024-11-20T19:26:47,398 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 693b25271063405fa697fd4c3bbd1242, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732130802617 2024-11-20T19:26:47,398 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9873e94cfc142fb99b30cb6d897c4d6, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732130803945 2024-11-20T19:26:47,399 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b6622d32c8940c2a0b357cca45f1794, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732130803245 2024-11-20T19:26:47,399 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b4ba5ff8d89444f8c4e66602789601e, keycount=300, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732130806208 2024-11-20T19:26:47,400 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting d621885a67f745e6ad5ab668ced4a843, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732130803945 2024-11-20T19:26:47,400 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a43ae8d8490409ea5fb7a67e0324809, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732130806290 2024-11-20T19:26:47,439 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#B#compaction#366 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:47,439 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/82b65e4e49624deeb0c85c42bf50f769 is 50, key is test_row_0/B:col10/1732130806290/Put/seqid=0 2024-11-20T19:26:47,448 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:47,460 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:47,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-20T19:26:47,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:47,461 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:26:47,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:47,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:47,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:47,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:47,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:47,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:47,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:47,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:47,476 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411202825a538149746b5b0fa6e7bff2c9103_fea4de996b388410b192fd1511bd28fa store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:47,480 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411202825a538149746b5b0fa6e7bff2c9103_fea4de996b388410b192fd1511bd28fa, store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:47,480 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202825a538149746b5b0fa6e7bff2c9103_fea4de996b388410b192fd1511bd28fa because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:47,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742258_1434 (size=12527) 2024-11-20T19:26:47,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112099f74c0e72c1443fba1d4d93ebb2b994_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130806501/Put/seqid=0 2024-11-20T19:26:47,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130867532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:47,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130867532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:47,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742259_1435 (size=4469) 2024-11-20T19:26:47,557 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#A#compaction#367 average throughput is 0.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:47,557 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/acc179ed329541c18707595c00310494 is 175, key is test_row_0/A:col10/1732130806290/Put/seqid=0 2024-11-20T19:26:47,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742260_1436 (size=12304) 2024-11-20T19:26:47,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:47,596 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112099f74c0e72c1443fba1d4d93ebb2b994_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112099f74c0e72c1443fba1d4d93ebb2b994_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:47,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/30d602d8eb48483f9d0ff0e8143f6e29, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:47,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/30d602d8eb48483f9d0ff0e8143f6e29 is 175, key is test_row_0/A:col10/1732130806501/Put/seqid=0 2024-11-20T19:26:47,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742261_1437 (size=31481) 2024-11-20T19:26:47,613 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/acc179ed329541c18707595c00310494 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/acc179ed329541c18707595c00310494 2024-11-20T19:26:47,621 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in fea4de996b388410b192fd1511bd28fa/A of fea4de996b388410b192fd1511bd28fa into acc179ed329541c18707595c00310494(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:47,621 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:47,621 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/A, priority=11, startTime=1732130807394; duration=0sec 2024-11-20T19:26:47,621 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:47,621 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:A 2024-11-20T19:26:47,621 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-20T19:26:47,623 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60511 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-20T19:26:47,624 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/C is initiating minor compaction (all files) 2024-11-20T19:26:47,624 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/C in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:47,624 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/e6a4bb766b0e4debb7353c15a15c9286, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/cc6d12b5528a4be2a87fba1282558282, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/d75ce22f6cb54b2589ca7038d312bb8e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/84e0f7404b7a40dcacd05bf5949fc9a5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/ae764e832f444ff38e2afcb89624404e] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=59.1 K 2024-11-20T19:26:47,624 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6a4bb766b0e4debb7353c15a15c9286, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732130800431 2024-11-20T19:26:47,624 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc6d12b5528a4be2a87fba1282558282, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732130802617 2024-11-20T19:26:47,625 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d75ce22f6cb54b2589ca7038d312bb8e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732130803245 2024-11-20T19:26:47,625 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84e0f7404b7a40dcacd05bf5949fc9a5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732130803945 2024-11-20T19:26:47,626 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae764e832f444ff38e2afcb89624404e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732130806290 2024-11-20T19:26:47,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742262_1438 (size=31105) 2024-11-20T19:26:47,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130867644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:47,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130867647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:47,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130867648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:47,658 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#C#compaction#369 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:47,658 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/8c20c3b0a8a64109ad6f25e6a94e2580 is 50, key is test_row_0/C:col10/1732130806290/Put/seqid=0 2024-11-20T19:26:47,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130867659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:47,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130867660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:47,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742263_1439 (size=12527) 2024-11-20T19:26:47,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130867853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:47,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:47,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130867857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:47,895 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/82b65e4e49624deeb0c85c42bf50f769 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/82b65e4e49624deeb0c85c42bf50f769 2024-11-20T19:26:47,900 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in fea4de996b388410b192fd1511bd28fa/B of fea4de996b388410b192fd1511bd28fa into 82b65e4e49624deeb0c85c42bf50f769(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:47,900 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:47,900 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/B, priority=11, startTime=1732130807394; duration=0sec 2024-11-20T19:26:47,900 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:47,900 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:B 2024-11-20T19:26:48,040 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=194, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/30d602d8eb48483f9d0ff0e8143f6e29 2024-11-20T19:26:48,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/69cfaa2822b14966b03ff6d80f810c0d is 50, key is test_row_0/B:col10/1732130806501/Put/seqid=0 2024-11-20T19:26:48,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742264_1440 (size=12151) 2024-11-20T19:26:48,076 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/69cfaa2822b14966b03ff6d80f810c0d 2024-11-20T19:26:48,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/2d9abc75c9a6498c81390cd03515df65 is 50, key is test_row_0/C:col10/1732130806501/Put/seqid=0 2024-11-20T19:26:48,108 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/8c20c3b0a8a64109ad6f25e6a94e2580 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/8c20c3b0a8a64109ad6f25e6a94e2580 2024-11-20T19:26:48,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742265_1441 (size=12151) 2024-11-20T19:26:48,113 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in fea4de996b388410b192fd1511bd28fa/C of fea4de996b388410b192fd1511bd28fa into 8c20c3b0a8a64109ad6f25e6a94e2580(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:48,113 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:48,113 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/C, priority=11, startTime=1732130807394; duration=0sec 2024-11-20T19:26:48,113 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:48,113 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:C 2024-11-20T19:26:48,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130868164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:48,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130868168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:48,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T19:26:48,510 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/2d9abc75c9a6498c81390cd03515df65 2024-11-20T19:26:48,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/30d602d8eb48483f9d0ff0e8143f6e29 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/30d602d8eb48483f9d0ff0e8143f6e29 2024-11-20T19:26:48,530 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/30d602d8eb48483f9d0ff0e8143f6e29, entries=150, sequenceid=194, filesize=30.4 K 2024-11-20T19:26:48,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/69cfaa2822b14966b03ff6d80f810c0d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/69cfaa2822b14966b03ff6d80f810c0d 2024-11-20T19:26:48,535 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/69cfaa2822b14966b03ff6d80f810c0d, entries=150, sequenceid=194, filesize=11.9 K 2024-11-20T19:26:48,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/2d9abc75c9a6498c81390cd03515df65 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/2d9abc75c9a6498c81390cd03515df65 2024-11-20T19:26:48,543 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/2d9abc75c9a6498c81390cd03515df65, entries=150, sequenceid=194, filesize=11.9 K 2024-11-20T19:26:48,544 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for fea4de996b388410b192fd1511bd28fa in 1083ms, sequenceid=194, compaction requested=false 2024-11-20T19:26:48,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:48,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:48,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-20T19:26:48,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-20T19:26:48,546 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-20T19:26:48,546 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3110 sec 2024-11-20T19:26:48,547 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 2.3150 sec 2024-11-20T19:26:48,665 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:26:48,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:48,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:48,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:48,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:48,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:48,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:48,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:48,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b24b9743525240cc8199b8af67496350_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130807530/Put/seqid=0 2024-11-20T19:26:48,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742266_1442 (size=14794) 2024-11-20T19:26:48,713 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:48,716 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b24b9743525240cc8199b8af67496350_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b24b9743525240cc8199b8af67496350_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:48,718 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/6f9ec4134ac847e0ad68e5b440408355, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:48,718 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/6f9ec4134ac847e0ad68e5b440408355 is 175, key is test_row_0/A:col10/1732130807530/Put/seqid=0 2024-11-20T19:26:48,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130868724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:48,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130868727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:48,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130868728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:48,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130868730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:48,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742267_1443 (size=39749) 2024-11-20T19:26:48,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130868739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:48,749 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=210, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/6f9ec4134ac847e0ad68e5b440408355 2024-11-20T19:26:48,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/ca8cf4f4591d467ca9c3fc0788829728 is 50, key is test_row_0/B:col10/1732130807530/Put/seqid=0 2024-11-20T19:26:48,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742268_1444 (size=12151) 2024-11-20T19:26:48,772 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/ca8cf4f4591d467ca9c3fc0788829728 2024-11-20T19:26:48,792 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/c546d0d71d3c4bda9b99299c4ff5f0eb is 50, key is test_row_0/C:col10/1732130807530/Put/seqid=0 2024-11-20T19:26:48,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742269_1445 (size=12151) 2024-11-20T19:26:48,837 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/c546d0d71d3c4bda9b99299c4ff5f0eb 2024-11-20T19:26:48,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/6f9ec4134ac847e0ad68e5b440408355 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/6f9ec4134ac847e0ad68e5b440408355 2024-11-20T19:26:48,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130868841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:48,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130868844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:48,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130868844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:48,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130868844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:48,852 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/6f9ec4134ac847e0ad68e5b440408355, entries=200, sequenceid=210, filesize=38.8 K 2024-11-20T19:26:48,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/ca8cf4f4591d467ca9c3fc0788829728 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/ca8cf4f4591d467ca9c3fc0788829728 2024-11-20T19:26:48,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:48,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130868850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:48,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/ca8cf4f4591d467ca9c3fc0788829728, entries=150, sequenceid=210, filesize=11.9 K 2024-11-20T19:26:48,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/c546d0d71d3c4bda9b99299c4ff5f0eb as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c546d0d71d3c4bda9b99299c4ff5f0eb 2024-11-20T19:26:48,864 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c546d0d71d3c4bda9b99299c4ff5f0eb, entries=150, sequenceid=210, filesize=11.9 K 2024-11-20T19:26:48,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for fea4de996b388410b192fd1511bd28fa in 201ms, sequenceid=210, compaction requested=true 2024-11-20T19:26:48,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:48,867 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:48,868 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102335 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:48,868 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/A is initiating minor compaction (all files) 2024-11-20T19:26:48,868 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/A in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:48,868 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/acc179ed329541c18707595c00310494, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/30d602d8eb48483f9d0ff0e8143f6e29, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/6f9ec4134ac847e0ad68e5b440408355] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=99.9 K 2024-11-20T19:26:48,868 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:48,868 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/acc179ed329541c18707595c00310494, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/30d602d8eb48483f9d0ff0e8143f6e29, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/6f9ec4134ac847e0ad68e5b440408355] 2024-11-20T19:26:48,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:48,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:48,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:48,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:48,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:48,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:48,871 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:48,871 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting acc179ed329541c18707595c00310494, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732130806290 2024-11-20T19:26:48,871 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30d602d8eb48483f9d0ff0e8143f6e29, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732130806495 2024-11-20T19:26:48,872 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f9ec4134ac847e0ad68e5b440408355, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732130807514 2024-11-20T19:26:48,872 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:48,872 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/B is initiating minor compaction (all files) 2024-11-20T19:26:48,872 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/B in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:48,872 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/82b65e4e49624deeb0c85c42bf50f769, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/69cfaa2822b14966b03ff6d80f810c0d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/ca8cf4f4591d467ca9c3fc0788829728] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=36.0 K 2024-11-20T19:26:48,873 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 82b65e4e49624deeb0c85c42bf50f769, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732130806290 2024-11-20T19:26:48,873 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 69cfaa2822b14966b03ff6d80f810c0d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732130806495 2024-11-20T19:26:48,874 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting ca8cf4f4591d467ca9c3fc0788829728, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732130807514 2024-11-20T19:26:48,894 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#B#compaction#375 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:48,898 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/a588755a63004b5e8d3ab486d3118c5c is 50, key is test_row_0/B:col10/1732130807530/Put/seqid=0 2024-11-20T19:26:48,903 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:48,915 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120fc4d8f8bcb31424a8075f2b7f99cff9e_fea4de996b388410b192fd1511bd28fa store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:48,917 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120fc4d8f8bcb31424a8075f2b7f99cff9e_fea4de996b388410b192fd1511bd28fa, store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:48,917 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fc4d8f8bcb31424a8075f2b7f99cff9e_fea4de996b388410b192fd1511bd28fa because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:48,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742270_1446 (size=12629) 2024-11-20T19:26:48,936 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/a588755a63004b5e8d3ab486d3118c5c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/a588755a63004b5e8d3ab486d3118c5c 2024-11-20T19:26:48,947 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/B of fea4de996b388410b192fd1511bd28fa into a588755a63004b5e8d3ab486d3118c5c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:48,947 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:48,947 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/B, priority=13, startTime=1732130808870; duration=0sec 2024-11-20T19:26:48,948 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:48,948 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:B 2024-11-20T19:26:48,948 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:48,950 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:48,950 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/C is initiating minor compaction (all files) 2024-11-20T19:26:48,950 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/C in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:48,950 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/8c20c3b0a8a64109ad6f25e6a94e2580, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/2d9abc75c9a6498c81390cd03515df65, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c546d0d71d3c4bda9b99299c4ff5f0eb] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=36.0 K 2024-11-20T19:26:48,951 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c20c3b0a8a64109ad6f25e6a94e2580, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732130806290 2024-11-20T19:26:48,951 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d9abc75c9a6498c81390cd03515df65, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732130806495 2024-11-20T19:26:48,951 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting c546d0d71d3c4bda9b99299c4ff5f0eb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732130807514 2024-11-20T19:26:48,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742271_1447 (size=4469) 2024-11-20T19:26:48,985 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#A#compaction#376 average throughput is 0.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:48,986 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/f978ebb7748143d0ac152ccf6b6abe70 is 175, key is test_row_0/A:col10/1732130807530/Put/seqid=0 2024-11-20T19:26:48,987 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#C#compaction#377 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:48,987 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/70903dd3ed8f40619b66d50442c51e5d is 50, key is test_row_0/C:col10/1732130807530/Put/seqid=0 2024-11-20T19:26:49,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742272_1448 (size=31583) 2024-11-20T19:26:49,042 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/f978ebb7748143d0ac152ccf6b6abe70 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f978ebb7748143d0ac152ccf6b6abe70 2024-11-20T19:26:49,049 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/A of fea4de996b388410b192fd1511bd28fa into f978ebb7748143d0ac152ccf6b6abe70(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:49,049 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:49,049 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/A, priority=13, startTime=1732130808867; duration=0sec 2024-11-20T19:26:49,050 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:49,050 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:A 2024-11-20T19:26:49,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742273_1449 (size=12629) 2024-11-20T19:26:49,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:49,058 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T19:26:49,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:49,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:49,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:49,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:49,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:49,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:49,062 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/70903dd3ed8f40619b66d50442c51e5d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/70903dd3ed8f40619b66d50442c51e5d 2024-11-20T19:26:49,070 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/C of fea4de996b388410b192fd1511bd28fa into 70903dd3ed8f40619b66d50442c51e5d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:49,070 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:49,070 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/C, priority=13, startTime=1732130808871; duration=0sec 2024-11-20T19:26:49,070 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:49,070 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:C 2024-11-20T19:26:49,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130869072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130869073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130869076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130869077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130869078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202e47c26a72ae41dda6031c037945fcb7_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130809057/Put/seqid=0 2024-11-20T19:26:49,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742274_1450 (size=14794) 2024-11-20T19:26:49,122 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:49,125 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202e47c26a72ae41dda6031c037945fcb7_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202e47c26a72ae41dda6031c037945fcb7_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:49,125 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/d0cd60554da74c03a96f54615515ba52, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:49,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/d0cd60554da74c03a96f54615515ba52 is 175, key is test_row_0/A:col10/1732130809057/Put/seqid=0 2024-11-20T19:26:49,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742275_1451 (size=39749) 2024-11-20T19:26:49,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130869185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130869185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130869185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130869187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130869188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130869389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130869391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130869392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130869395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130869397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,545 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=236, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/d0cd60554da74c03a96f54615515ba52 2024-11-20T19:26:49,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/104fd00d6cf6464c9b7c48781a0482e6 is 50, key is test_row_0/B:col10/1732130809057/Put/seqid=0 2024-11-20T19:26:49,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742276_1452 (size=12151) 2024-11-20T19:26:49,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130869693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130869697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130869699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130869701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:49,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130869706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:49,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/104fd00d6cf6464c9b7c48781a0482e6 2024-11-20T19:26:50,001 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/c07b16db976e4e9893196e2582f3bcef is 50, key is test_row_0/C:col10/1732130809057/Put/seqid=0 2024-11-20T19:26:50,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742277_1453 (size=12151) 2024-11-20T19:26:50,031 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/c07b16db976e4e9893196e2582f3bcef 2024-11-20T19:26:50,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/d0cd60554da74c03a96f54615515ba52 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d0cd60554da74c03a96f54615515ba52 2024-11-20T19:26:50,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d0cd60554da74c03a96f54615515ba52, entries=200, sequenceid=236, filesize=38.8 K 2024-11-20T19:26:50,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/104fd00d6cf6464c9b7c48781a0482e6 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/104fd00d6cf6464c9b7c48781a0482e6 2024-11-20T19:26:50,050 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/104fd00d6cf6464c9b7c48781a0482e6, entries=150, sequenceid=236, filesize=11.9 K 2024-11-20T19:26:50,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/c07b16db976e4e9893196e2582f3bcef as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c07b16db976e4e9893196e2582f3bcef 2024-11-20T19:26:50,056 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c07b16db976e4e9893196e2582f3bcef, entries=150, sequenceid=236, filesize=11.9 K 2024-11-20T19:26:50,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for fea4de996b388410b192fd1511bd28fa in 999ms, sequenceid=236, compaction requested=false 2024-11-20T19:26:50,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:50,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:50,200 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:26:50,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:50,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:50,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:50,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:50,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:50,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:50,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206378675cfc9a425597ecaa01dd6f2a59_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130810198/Put/seqid=0 2024-11-20T19:26:50,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130870242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130870242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130870243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742278_1454 (size=14794) 2024-11-20T19:26:50,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130870253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130870254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-20T19:26:50,339 INFO [Thread-1746 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-20T19:26:50,342 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:50,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-20T19:26:50,343 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:50,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T19:26:50,345 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:50,345 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:50,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130870354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130870355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130870355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130870364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130870365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T19:26:50,497 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,498 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:50,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:50,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:50,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:50,498 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:50,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:50,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:50,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130870563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130870570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130870570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130870572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130870574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T19:26:50,651 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,651 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:50,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:50,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:50,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:50,652 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:50,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:50,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:50,664 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:50,668 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411206378675cfc9a425597ecaa01dd6f2a59_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206378675cfc9a425597ecaa01dd6f2a59_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:50,669 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/19442f20e8384f91909ead524fe39f39, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:50,669 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/19442f20e8384f91909ead524fe39f39 is 175, key is test_row_0/A:col10/1732130810198/Put/seqid=0 2024-11-20T19:26:50,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742279_1455 (size=39749) 2024-11-20T19:26:50,681 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/19442f20e8384f91909ead524fe39f39 2024-11-20T19:26:50,709 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/fb3373cd662c49f59a4b71dc1a9357fb is 50, key is test_row_0/B:col10/1732130810198/Put/seqid=0 2024-11-20T19:26:50,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742280_1456 (size=12151) 2024-11-20T19:26:50,804 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:50,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:50,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:50,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:50,804 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:50,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:50,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:50,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130870866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130870875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130870877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130870877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:50,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130870878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T19:26:50,957 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:50,957 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:50,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:50,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:50,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:50,957 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:50,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:50,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:51,109 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:51,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:51,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:51,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:51,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:51,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:51,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:51,142 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/fb3373cd662c49f59a4b71dc1a9357fb 2024-11-20T19:26:51,160 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/c131f3dc88a54df5bdd3858c768522e7 is 50, key is test_row_0/C:col10/1732130810198/Put/seqid=0 2024-11-20T19:26:51,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742281_1457 (size=12151) 2024-11-20T19:26:51,201 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/c131f3dc88a54df5bdd3858c768522e7 2024-11-20T19:26:51,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/19442f20e8384f91909ead524fe39f39 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/19442f20e8384f91909ead524fe39f39 2024-11-20T19:26:51,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/19442f20e8384f91909ead524fe39f39, entries=200, sequenceid=250, filesize=38.8 K 2024-11-20T19:26:51,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/fb3373cd662c49f59a4b71dc1a9357fb as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/fb3373cd662c49f59a4b71dc1a9357fb 2024-11-20T19:26:51,215 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/fb3373cd662c49f59a4b71dc1a9357fb, entries=150, sequenceid=250, filesize=11.9 K 2024-11-20T19:26:51,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/c131f3dc88a54df5bdd3858c768522e7 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c131f3dc88a54df5bdd3858c768522e7 2024-11-20T19:26:51,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c131f3dc88a54df5bdd3858c768522e7, entries=150, sequenceid=250, filesize=11.9 K 2024-11-20T19:26:51,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for fea4de996b388410b192fd1511bd28fa in 1022ms, sequenceid=250, compaction requested=true 2024-11-20T19:26:51,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:51,222 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:51,222 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111081 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:51,222 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/A is initiating minor compaction (all files) 2024-11-20T19:26:51,223 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/A in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:51,223 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f978ebb7748143d0ac152ccf6b6abe70, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d0cd60554da74c03a96f54615515ba52, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/19442f20e8384f91909ead524fe39f39] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=108.5 K 2024-11-20T19:26:51,223 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:51,223 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f978ebb7748143d0ac152ccf6b6abe70, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d0cd60554da74c03a96f54615515ba52, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/19442f20e8384f91909ead524fe39f39] 2024-11-20T19:26:51,223 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting f978ebb7748143d0ac152ccf6b6abe70, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732130807514 2024-11-20T19:26:51,223 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0cd60554da74c03a96f54615515ba52, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732130808708 2024-11-20T19:26:51,224 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19442f20e8384f91909ead524fe39f39, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732130809073 2024-11-20T19:26:51,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:51,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:51,228 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:51,229 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:51,229 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/B is initiating minor compaction (all files) 2024-11-20T19:26:51,229 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/B in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:51,229 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/a588755a63004b5e8d3ab486d3118c5c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/104fd00d6cf6464c9b7c48781a0482e6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/fb3373cd662c49f59a4b71dc1a9357fb] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=36.1 K 2024-11-20T19:26:51,229 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a588755a63004b5e8d3ab486d3118c5c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732130807514 2024-11-20T19:26:51,230 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 104fd00d6cf6464c9b7c48781a0482e6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732130808708 2024-11-20T19:26:51,230 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting fb3373cd662c49f59a4b71dc1a9357fb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732130809073 2024-11-20T19:26:51,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:51,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:51,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:51,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:51,243 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:51,253 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#B#compaction#385 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:51,253 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/1431b7e07a1540598d8bc7e5f5d59aa2 is 50, key is test_row_0/B:col10/1732130810198/Put/seqid=0 2024-11-20T19:26:51,261 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411202919508d71664b9487232e91a7279e4a_fea4de996b388410b192fd1511bd28fa store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:51,261 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-20T19:26:51,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:51,262 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T19:26:51,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:51,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:51,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:51,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:51,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:51,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:51,263 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411202919508d71664b9487232e91a7279e4a_fea4de996b388410b192fd1511bd28fa, store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:51,263 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202919508d71664b9487232e91a7279e4a_fea4de996b388410b192fd1511bd28fa because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:51,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742282_1458 (size=12731) 2024-11-20T19:26:51,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205190add2ff444f14b32fa9798a620726_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130810251/Put/seqid=0 2024-11-20T19:26:51,354 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/1431b7e07a1540598d8bc7e5f5d59aa2 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/1431b7e07a1540598d8bc7e5f5d59aa2 2024-11-20T19:26:51,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742283_1459 (size=4469) 2024-11-20T19:26:51,358 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#A#compaction#384 average throughput is 0.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:51,359 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/cdc88d0d8bf24ae38b992c9c7fb07289 is 175, key is test_row_0/A:col10/1732130810198/Put/seqid=0 2024-11-20T19:26:51,361 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/B of fea4de996b388410b192fd1511bd28fa into 1431b7e07a1540598d8bc7e5f5d59aa2(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:51,361 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:51,361 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/B, priority=13, startTime=1732130811228; duration=0sec 2024-11-20T19:26:51,361 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:51,361 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:B 2024-11-20T19:26:51,361 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:51,363 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:51,363 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/C is initiating minor compaction (all files) 2024-11-20T19:26:51,363 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/C in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:51,363 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/70903dd3ed8f40619b66d50442c51e5d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c07b16db976e4e9893196e2582f3bcef, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c131f3dc88a54df5bdd3858c768522e7] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=36.1 K 2024-11-20T19:26:51,363 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 70903dd3ed8f40619b66d50442c51e5d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732130807514 2024-11-20T19:26:51,364 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting c07b16db976e4e9893196e2582f3bcef, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1732130808708 2024-11-20T19:26:51,364 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting c131f3dc88a54df5bdd3858c768522e7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732130809073 2024-11-20T19:26:51,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:51,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:51,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742284_1460 (size=12454) 2024-11-20T19:26:51,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:51,405 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205190add2ff444f14b32fa9798a620726_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205190add2ff444f14b32fa9798a620726_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:51,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/7724c1220eb94b8fad3677045859bb6e, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:51,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/7724c1220eb94b8fad3677045859bb6e is 175, key is test_row_0/A:col10/1732130810251/Put/seqid=0 2024-11-20T19:26:51,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742285_1461 (size=31685) 2024-11-20T19:26:51,410 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#C#compaction#387 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:51,411 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/bacafbc4fa8544a9a6cf4c199183c7bf is 50, key is test_row_0/C:col10/1732130810198/Put/seqid=0 2024-11-20T19:26:51,414 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/cdc88d0d8bf24ae38b992c9c7fb07289 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/cdc88d0d8bf24ae38b992c9c7fb07289 2024-11-20T19:26:51,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130871409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,418 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/A of fea4de996b388410b192fd1511bd28fa into cdc88d0d8bf24ae38b992c9c7fb07289(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:51,418 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:51,418 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/A, priority=13, startTime=1732130811221; duration=0sec 2024-11-20T19:26:51,419 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:51,419 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:A 2024-11-20T19:26:51,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130871413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130871414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130871415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130871416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T19:26:51,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742286_1462 (size=31255) 2024-11-20T19:26:51,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742287_1463 (size=12731) 2024-11-20T19:26:51,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130871518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130871531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130871531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130871532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130871532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130871726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130871738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130871740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130871742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:51,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130871743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:51,860 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=273, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/7724c1220eb94b8fad3677045859bb6e 2024-11-20T19:26:51,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/7dc631225b6e4181a1ab617741797425 is 50, key is test_row_0/B:col10/1732130810251/Put/seqid=0 2024-11-20T19:26:51,888 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/bacafbc4fa8544a9a6cf4c199183c7bf as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/bacafbc4fa8544a9a6cf4c199183c7bf 2024-11-20T19:26:51,893 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/C of fea4de996b388410b192fd1511bd28fa into bacafbc4fa8544a9a6cf4c199183c7bf(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:51,893 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:51,893 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/C, priority=13, startTime=1732130811235; duration=0sec 2024-11-20T19:26:51,893 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:51,893 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:C 2024-11-20T19:26:51,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742288_1464 (size=12301) 2024-11-20T19:26:52,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130872032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130872046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130872050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130872052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,056 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130872053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,305 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/7dc631225b6e4181a1ab617741797425 2024-11-20T19:26:52,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/d67b144e37bf453c9fb753ad919d6103 is 50, key is test_row_0/C:col10/1732130810251/Put/seqid=0 2024-11-20T19:26:52,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742289_1465 (size=12301) 2024-11-20T19:26:52,345 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/d67b144e37bf453c9fb753ad919d6103 2024-11-20T19:26:52,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/7724c1220eb94b8fad3677045859bb6e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/7724c1220eb94b8fad3677045859bb6e 2024-11-20T19:26:52,367 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/7724c1220eb94b8fad3677045859bb6e, entries=150, sequenceid=273, filesize=30.5 K 2024-11-20T19:26:52,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/7dc631225b6e4181a1ab617741797425 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/7dc631225b6e4181a1ab617741797425 2024-11-20T19:26:52,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,373 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/7dc631225b6e4181a1ab617741797425, entries=150, sequenceid=273, filesize=12.0 K 2024-11-20T19:26:52,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/d67b144e37bf453c9fb753ad919d6103 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/d67b144e37bf453c9fb753ad919d6103 2024-11-20T19:26:52,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,384 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/d67b144e37bf453c9fb753ad919d6103, entries=150, sequenceid=273, filesize=12.0 K 2024-11-20T19:26:52,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,385 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for fea4de996b388410b192fd1511bd28fa in 1123ms, sequenceid=273, compaction requested=false 2024-11-20T19:26:52,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:52,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-20T19:26:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-20T19:26:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,388 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-20T19:26:52,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,388 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0410 sec 2024-11-20T19:26:52,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,390 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 2.0470 sec 2024-11-20T19:26:52,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-20T19:26:52,459 INFO [Thread-1746 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-20T19:26:52,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,461 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:52,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-20T19:26:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T19:26:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,463 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:52,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,463 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:52,463 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:52,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T19:26:52,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:52,580 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T19:26:52,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:52,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:52,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:52,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:52,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:52,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,581 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:52,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,616 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,617 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:26:52,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:52,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:52,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:52,617 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:52,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:52,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:52,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ca37001fbeac4aacbecc91f985ada9db_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130811413/Put/seqid=0 2024-11-20T19:26:52,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130872643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130872643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130872649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742290_1466 (size=12454) 2024-11-20T19:26:52,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130872650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130872654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,665 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:52,668 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120ca37001fbeac4aacbecc91f985ada9db_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ca37001fbeac4aacbecc91f985ada9db_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:52,670 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/b7e5a606e31340ffbe9563597fc2b429, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:52,671 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/b7e5a606e31340ffbe9563597fc2b429 is 175, key is test_row_0/A:col10/1732130811413/Put/seqid=0 2024-11-20T19:26:52,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742291_1467 (size=31251) 2024-11-20T19:26:52,708 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=291, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/b7e5a606e31340ffbe9563597fc2b429 2024-11-20T19:26:52,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/ed5a1144df3c4032bb27381d2b2a2e97 is 50, key is test_row_0/B:col10/1732130811413/Put/seqid=0 2024-11-20T19:26:52,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130872757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130872757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130872757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T19:26:52,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742292_1468 (size=9857) 2024-11-20T19:26:52,773 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,774 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:26:52,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130872762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:52,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:52,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:52,774 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:52,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:52,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:52,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130872765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:52,776 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/ed5a1144df3c4032bb27381d2b2a2e97 2024-11-20T19:26:52,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/0dbe77dc404e4a1ba62718e28f17bc7c is 50, key is test_row_0/C:col10/1732130811413/Put/seqid=0 2024-11-20T19:26:52,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742293_1469 (size=9857) 2024-11-20T19:26:52,827 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/0dbe77dc404e4a1ba62718e28f17bc7c 2024-11-20T19:26:52,831 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/b7e5a606e31340ffbe9563597fc2b429 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/b7e5a606e31340ffbe9563597fc2b429 2024-11-20T19:26:52,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/b7e5a606e31340ffbe9563597fc2b429, entries=150, sequenceid=291, filesize=30.5 K 2024-11-20T19:26:52,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/ed5a1144df3c4032bb27381d2b2a2e97 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/ed5a1144df3c4032bb27381d2b2a2e97 2024-11-20T19:26:52,846 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/ed5a1144df3c4032bb27381d2b2a2e97, entries=100, sequenceid=291, filesize=9.6 K 2024-11-20T19:26:52,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/0dbe77dc404e4a1ba62718e28f17bc7c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/0dbe77dc404e4a1ba62718e28f17bc7c 2024-11-20T19:26:52,855 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/0dbe77dc404e4a1ba62718e28f17bc7c, entries=100, sequenceid=291, filesize=9.6 K 2024-11-20T19:26:52,856 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for fea4de996b388410b192fd1511bd28fa in 276ms, sequenceid=291, compaction requested=true 2024-11-20T19:26:52,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:52,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:52,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:52,856 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:52,856 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:52,857 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94191 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:52,857 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/A is initiating minor compaction (all files) 2024-11-20T19:26:52,858 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/A in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:52,858 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/cdc88d0d8bf24ae38b992c9c7fb07289, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/7724c1220eb94b8fad3677045859bb6e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/b7e5a606e31340ffbe9563597fc2b429] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=92.0 K 2024-11-20T19:26:52,858 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:52,858 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/cdc88d0d8bf24ae38b992c9c7fb07289, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/7724c1220eb94b8fad3677045859bb6e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/b7e5a606e31340ffbe9563597fc2b429] 2024-11-20T19:26:52,858 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34889 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:52,858 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/B is initiating minor compaction (all files) 2024-11-20T19:26:52,858 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/B in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:52,858 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/1431b7e07a1540598d8bc7e5f5d59aa2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/7dc631225b6e4181a1ab617741797425, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/ed5a1144df3c4032bb27381d2b2a2e97] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=34.1 K 2024-11-20T19:26:52,859 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 1431b7e07a1540598d8bc7e5f5d59aa2, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732130809073 2024-11-20T19:26:52,859 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting cdc88d0d8bf24ae38b992c9c7fb07289, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732130809073 2024-11-20T19:26:52,859 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 7dc631225b6e4181a1ab617741797425, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1732130810239 2024-11-20T19:26:52,859 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7724c1220eb94b8fad3677045859bb6e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1732130810239 2024-11-20T19:26:52,859 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7e5a606e31340ffbe9563597fc2b429, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732130811413 2024-11-20T19:26:52,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:52,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:52,859 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting ed5a1144df3c4032bb27381d2b2a2e97, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732130811413 2024-11-20T19:26:52,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:52,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:52,867 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:52,872 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#B#compaction#394 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:52,875 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/b12dde4018fe4eeba88885a165107456 is 50, key is test_row_0/B:col10/1732130811413/Put/seqid=0 2024-11-20T19:26:52,883 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411208a8af0c8eae9437597c62e3e41ab92b4_fea4de996b388410b192fd1511bd28fa store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:52,885 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411208a8af0c8eae9437597c62e3e41ab92b4_fea4de996b388410b192fd1511bd28fa, store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:52,885 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411208a8af0c8eae9437597c62e3e41ab92b4_fea4de996b388410b192fd1511bd28fa because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:52,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742294_1470 (size=12983) 2024-11-20T19:26:52,916 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/b12dde4018fe4eeba88885a165107456 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/b12dde4018fe4eeba88885a165107456 2024-11-20T19:26:52,924 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/B of fea4de996b388410b192fd1511bd28fa into b12dde4018fe4eeba88885a165107456(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:52,924 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:52,924 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/B, priority=13, startTime=1732130812856; duration=0sec 2024-11-20T19:26:52,925 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:52,925 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:B 2024-11-20T19:26:52,925 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:52,926 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34889 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:52,926 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/C is initiating minor compaction (all files) 2024-11-20T19:26:52,926 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/C in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:52,926 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:52,926 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/bacafbc4fa8544a9a6cf4c199183c7bf, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/d67b144e37bf453c9fb753ad919d6103, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/0dbe77dc404e4a1ba62718e28f17bc7c] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=34.1 K 2024-11-20T19:26:52,928 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-20T19:26:52,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:52,928 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T19:26:52,928 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting bacafbc4fa8544a9a6cf4c199183c7bf, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732130809073 2024-11-20T19:26:52,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:52,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:52,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:52,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:52,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:52,928 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting d67b144e37bf453c9fb753ad919d6103, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1732130810239 2024-11-20T19:26:52,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:52,930 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 0dbe77dc404e4a1ba62718e28f17bc7c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732130811413 2024-11-20T19:26:52,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742295_1471 (size=4469) 2024-11-20T19:26:52,950 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#A#compaction#393 average throughput is 0.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:52,951 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/dbc1080fbd324aa68fe3150022421f4a is 175, key is test_row_0/A:col10/1732130811413/Put/seqid=0 2024-11-20T19:26:52,963 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#C#compaction#395 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:52,963 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/72487fa661674e18b8bc2ecab402924c is 50, key is test_row_0/C:col10/1732130811413/Put/seqid=0 2024-11-20T19:26:52,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112025c173ae15bd4b4ebfb38df0a9bb5ed0_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130812632/Put/seqid=0 2024-11-20T19:26:52,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742296_1472 (size=32044) 2024-11-20T19:26:52,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742297_1473 (size=12454) 2024-11-20T19:26:52,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742298_1474 (size=12983) 2024-11-20T19:26:53,017 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:53,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:53,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130873033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130873044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130873044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130873045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130873045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T19:26:53,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130873149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130873156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130873157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130873157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130873159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130873362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130873365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130873365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130873365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130873371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:53,393 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112025c173ae15bd4b4ebfb38df0a9bb5ed0_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112025c173ae15bd4b4ebfb38df0a9bb5ed0_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:53,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/2b03e83cda6141e7a50bdf5a872bd2e8, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:53,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/2b03e83cda6141e7a50bdf5a872bd2e8 is 175, key is test_row_0/A:col10/1732130812632/Put/seqid=0 2024-11-20T19:26:53,399 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/dbc1080fbd324aa68fe3150022421f4a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/dbc1080fbd324aa68fe3150022421f4a 2024-11-20T19:26:53,399 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/72487fa661674e18b8bc2ecab402924c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/72487fa661674e18b8bc2ecab402924c 2024-11-20T19:26:53,405 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/A of fea4de996b388410b192fd1511bd28fa into dbc1080fbd324aa68fe3150022421f4a(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:53,405 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:53,405 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/A, priority=13, startTime=1732130812856; duration=0sec 2024-11-20T19:26:53,405 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:53,405 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:A 2024-11-20T19:26:53,406 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/C of fea4de996b388410b192fd1511bd28fa into 72487fa661674e18b8bc2ecab402924c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:53,406 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:53,406 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/C, priority=13, startTime=1732130812860; duration=0sec 2024-11-20T19:26:53,406 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:53,406 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:C 2024-11-20T19:26:53,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742299_1475 (size=31255) 2024-11-20T19:26:53,477 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=313, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/2b03e83cda6141e7a50bdf5a872bd2e8 2024-11-20T19:26:53,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/7c0430e3f33346fa8f54cc4b28109991 is 50, key is test_row_0/B:col10/1732130812632/Put/seqid=0 2024-11-20T19:26:53,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742300_1476 (size=12301) 2024-11-20T19:26:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T19:26:53,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130873671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130873674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130873675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130873676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:53,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130873682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:53,922 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/7c0430e3f33346fa8f54cc4b28109991 2024-11-20T19:26:53,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/221d87424f464c379164b9d5c9a05411 is 50, key is test_row_0/C:col10/1732130812632/Put/seqid=0 2024-11-20T19:26:53,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742301_1477 (size=12301) 2024-11-20T19:26:54,192 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130874183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:54,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130874183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:54,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130874184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:54,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130874189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:54,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:54,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130874179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:54,354 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/221d87424f464c379164b9d5c9a05411 2024-11-20T19:26:54,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/2b03e83cda6141e7a50bdf5a872bd2e8 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/2b03e83cda6141e7a50bdf5a872bd2e8 2024-11-20T19:26:54,362 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/2b03e83cda6141e7a50bdf5a872bd2e8, entries=150, sequenceid=313, filesize=30.5 K 2024-11-20T19:26:54,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/7c0430e3f33346fa8f54cc4b28109991 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/7c0430e3f33346fa8f54cc4b28109991 2024-11-20T19:26:54,368 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/7c0430e3f33346fa8f54cc4b28109991, entries=150, sequenceid=313, filesize=12.0 K 2024-11-20T19:26:54,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/221d87424f464c379164b9d5c9a05411 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/221d87424f464c379164b9d5c9a05411 2024-11-20T19:26:54,372 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/221d87424f464c379164b9d5c9a05411, entries=150, sequenceid=313, filesize=12.0 K 2024-11-20T19:26:54,374 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for fea4de996b388410b192fd1511bd28fa in 1445ms, sequenceid=313, compaction requested=false 2024-11-20T19:26:54,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:54,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:54,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-20T19:26:54,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-20T19:26:54,376 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-20T19:26:54,376 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9120 sec 2024-11-20T19:26:54,380 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 1.9170 sec 2024-11-20T19:26:54,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-20T19:26:54,568 INFO [Thread-1746 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-20T19:26:54,569 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:54,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-20T19:26:54,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T19:26:54,571 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:54,573 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:54,573 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:54,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T19:26:54,724 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:54,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-20T19:26:54,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:54,725 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T19:26:54,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:54,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:54,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:54,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:54,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:54,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:54,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205d0fe7d967ac4630afd211c00f23a280_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130813029/Put/seqid=0 2024-11-20T19:26:54,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742302_1478 (size=12454) 2024-11-20T19:26:54,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:54,766 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205d0fe7d967ac4630afd211c00f23a280_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205d0fe7d967ac4630afd211c00f23a280_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:54,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/a339a3e0350748dc89d778c8e4c3a96a, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:54,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/a339a3e0350748dc89d778c8e4c3a96a is 175, key is test_row_0/A:col10/1732130813029/Put/seqid=0 2024-11-20T19:26:54,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742303_1479 (size=31255) 2024-11-20T19:26:54,791 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=330, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/a339a3e0350748dc89d778c8e4c3a96a 2024-11-20T19:26:54,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/4aeffbecb5c74df9b0da602b9cdd7ed3 is 50, key is test_row_0/B:col10/1732130813029/Put/seqid=0 2024-11-20T19:26:54,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742304_1480 (size=12301) 2024-11-20T19:26:54,831 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/4aeffbecb5c74df9b0da602b9cdd7ed3 2024-11-20T19:26:54,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/ccd79bf9ccb8416c947efec8499b462b is 50, key is test_row_0/C:col10/1732130813029/Put/seqid=0 2024-11-20T19:26:54,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742305_1481 (size=12301) 2024-11-20T19:26:54,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T19:26:55,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T19:26:55,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:55,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:55,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130875230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130875236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130875237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130875240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130875240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,268 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/ccd79bf9ccb8416c947efec8499b462b 2024-11-20T19:26:55,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/a339a3e0350748dc89d778c8e4c3a96a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/a339a3e0350748dc89d778c8e4c3a96a 2024-11-20T19:26:55,295 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/a339a3e0350748dc89d778c8e4c3a96a, entries=150, sequenceid=330, filesize=30.5 K 2024-11-20T19:26:55,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/4aeffbecb5c74df9b0da602b9cdd7ed3 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/4aeffbecb5c74df9b0da602b9cdd7ed3 2024-11-20T19:26:55,305 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/4aeffbecb5c74df9b0da602b9cdd7ed3, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T19:26:55,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/ccd79bf9ccb8416c947efec8499b462b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/ccd79bf9ccb8416c947efec8499b462b 2024-11-20T19:26:55,320 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/ccd79bf9ccb8416c947efec8499b462b, entries=150, sequenceid=330, filesize=12.0 K 2024-11-20T19:26:55,321 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for fea4de996b388410b192fd1511bd28fa in 596ms, sequenceid=330, compaction requested=true 2024-11-20T19:26:55,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:55,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:55,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-20T19:26:55,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-20T19:26:55,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-20T19:26:55,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 749 msec 2024-11-20T19:26:55,325 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 755 msec 2024-11-20T19:26:55,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:55,348 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T19:26:55,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:55,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:55,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:55,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:55,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:55,350 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:55,357 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d737fd63a0f64f26b9e197b71c2eed5f_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130815239/Put/seqid=0 2024-11-20T19:26:55,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130875373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130875373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130875374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130875375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130875375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742306_1482 (size=17534) 2024-11-20T19:26:55,398 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:55,403 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d737fd63a0f64f26b9e197b71c2eed5f_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d737fd63a0f64f26b9e197b71c2eed5f_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:55,406 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/d528567fe7364582bd0992c8a31154be, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:55,407 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/d528567fe7364582bd0992c8a31154be is 175, key is test_row_0/A:col10/1732130815239/Put/seqid=0 2024-11-20T19:26:55,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742307_1483 (size=48639) 2024-11-20T19:26:55,433 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=353, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/d528567fe7364582bd0992c8a31154be 2024-11-20T19:26:55,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/001b783f95644469af459484a6cdbe8a is 50, key is test_row_0/B:col10/1732130815239/Put/seqid=0 2024-11-20T19:26:55,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742308_1484 (size=12301) 2024-11-20T19:26:55,484 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/001b783f95644469af459484a6cdbe8a 2024-11-20T19:26:55,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130875488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130875488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,499 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130875491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130875491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130875492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,512 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/2936606a67734f80a83be1ec2af66c07 is 50, key is test_row_0/C:col10/1732130815239/Put/seqid=0 2024-11-20T19:26:55,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742309_1485 (size=12301) 2024-11-20T19:26:55,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-20T19:26:55,675 INFO [Thread-1746 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-20T19:26:55,676 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:55,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-20T19:26:55,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T19:26:55,677 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:55,678 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:55,678 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:55,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130875700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130875701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130875701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130875701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:55,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130875702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T19:26:55,829 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,830 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T19:26:55,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:55,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:55,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:55,830 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:55,957 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/2936606a67734f80a83be1ec2af66c07 2024-11-20T19:26:55,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/d528567fe7364582bd0992c8a31154be as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d528567fe7364582bd0992c8a31154be 2024-11-20T19:26:55,966 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d528567fe7364582bd0992c8a31154be, entries=250, sequenceid=353, filesize=47.5 K 2024-11-20T19:26:55,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/001b783f95644469af459484a6cdbe8a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/001b783f95644469af459484a6cdbe8a 2024-11-20T19:26:55,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/001b783f95644469af459484a6cdbe8a, entries=150, sequenceid=353, filesize=12.0 K 2024-11-20T19:26:55,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/2936606a67734f80a83be1ec2af66c07 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/2936606a67734f80a83be1ec2af66c07 2024-11-20T19:26:55,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T19:26:55,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/2936606a67734f80a83be1ec2af66c07, entries=150, sequenceid=353, filesize=12.0 K 2024-11-20T19:26:55,979 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for fea4de996b388410b192fd1511bd28fa in 631ms, sequenceid=353, compaction requested=true 2024-11-20T19:26:55,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:55,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:55,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:55,980 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:55,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:55,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:55,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:55,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T19:26:55,982 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:55,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-20T19:26:55,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:55,982 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:55,982 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:26:55,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:55,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:55,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:55,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:55,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:55,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:55,985 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143193 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:55,985 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/A is initiating minor compaction (all files) 2024-11-20T19:26:55,985 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/A in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:55,985 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:55,985 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/dbc1080fbd324aa68fe3150022421f4a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/2b03e83cda6141e7a50bdf5a872bd2e8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/a339a3e0350748dc89d778c8e4c3a96a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d528567fe7364582bd0992c8a31154be] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=139.8 K 2024-11-20T19:26:55,985 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/B is initiating minor compaction (all files) 2024-11-20T19:26:55,985 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:55,985 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/B in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:55,985 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/dbc1080fbd324aa68fe3150022421f4a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/2b03e83cda6141e7a50bdf5a872bd2e8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/a339a3e0350748dc89d778c8e4c3a96a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d528567fe7364582bd0992c8a31154be] 2024-11-20T19:26:55,986 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/b12dde4018fe4eeba88885a165107456, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/7c0430e3f33346fa8f54cc4b28109991, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/4aeffbecb5c74df9b0da602b9cdd7ed3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/001b783f95644469af459484a6cdbe8a] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=48.7 K 2024-11-20T19:26:55,986 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting b12dde4018fe4eeba88885a165107456, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732130810251 2024-11-20T19:26:55,986 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting dbc1080fbd324aa68fe3150022421f4a, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732130810251 2024-11-20T19:26:55,987 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c0430e3f33346fa8f54cc4b28109991, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1732130812632 2024-11-20T19:26:55,987 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b03e83cda6141e7a50bdf5a872bd2e8, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1732130812632 2024-11-20T19:26:55,990 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4aeffbecb5c74df9b0da602b9cdd7ed3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732130813029 2024-11-20T19:26:55,990 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a339a3e0350748dc89d778c8e4c3a96a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732130813029 2024-11-20T19:26:55,990 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 001b783f95644469af459484a6cdbe8a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732130815239 2024-11-20T19:26:55,990 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting d528567fe7364582bd0992c8a31154be, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732130815236 2024-11-20T19:26:55,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fb00ed0cce11481aa41b3c1ef4037faa_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130815368/Put/seqid=0 2024-11-20T19:26:56,001 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:56,009 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#B#compaction#407 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:56,010 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/e34aa23aa69748b2b41e8920ea6a1872 is 50, key is test_row_0/B:col10/1732130815239/Put/seqid=0 2024-11-20T19:26:56,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:56,015 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:56,030 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120d4a601f9477d4bd394e00d9818277232_fea4de996b388410b192fd1511bd28fa store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:56,033 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120d4a601f9477d4bd394e00d9818277232_fea4de996b388410b192fd1511bd28fa, store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:56,033 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d4a601f9477d4bd394e00d9818277232_fea4de996b388410b192fd1511bd28fa because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:56,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130876044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130876057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742310_1486 (size=12454) 2024-11-20T19:26:56,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130876058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:56,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130876059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130876059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,078 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120fb00ed0cce11481aa41b3c1ef4037faa_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fb00ed0cce11481aa41b3c1ef4037faa_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:56,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/93916c1e0e7749378c7bcb6d177b1449, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:56,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/93916c1e0e7749378c7bcb6d177b1449 is 175, key is test_row_0/A:col10/1732130815368/Put/seqid=0 2024-11-20T19:26:56,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742312_1488 (size=4469) 2024-11-20T19:26:56,105 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#A#compaction#406 average throughput is 0.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:56,105 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/c2467e0b9d2d4972b51b69730c89cdd6 is 175, key is test_row_0/A:col10/1732130815239/Put/seqid=0 2024-11-20T19:26:56,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742311_1487 (size=13119) 2024-11-20T19:26:56,132 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/e34aa23aa69748b2b41e8920ea6a1872 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/e34aa23aa69748b2b41e8920ea6a1872 2024-11-20T19:26:56,141 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fea4de996b388410b192fd1511bd28fa/B of fea4de996b388410b192fd1511bd28fa into e34aa23aa69748b2b41e8920ea6a1872(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:56,141 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:56,141 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/B, priority=12, startTime=1732130815980; duration=0sec 2024-11-20T19:26:56,141 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:56,141 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:B 2024-11-20T19:26:56,141 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:26:56,143 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:26:56,143 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/C is initiating minor compaction (all files) 2024-11-20T19:26:56,143 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/C in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:56,144 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/72487fa661674e18b8bc2ecab402924c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/221d87424f464c379164b9d5c9a05411, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/ccd79bf9ccb8416c947efec8499b462b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/2936606a67734f80a83be1ec2af66c07] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=48.7 K 2024-11-20T19:26:56,144 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72487fa661674e18b8bc2ecab402924c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732130810251 2024-11-20T19:26:56,144 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 221d87424f464c379164b9d5c9a05411, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1732130812632 2024-11-20T19:26:56,144 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting ccd79bf9ccb8416c947efec8499b462b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732130813029 2024-11-20T19:26:56,145 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2936606a67734f80a83be1ec2af66c07, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732130815239 2024-11-20T19:26:56,154 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#C#compaction#408 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:56,154 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/94220e850d404006869b3699f71d3b66 is 50, key is test_row_0/C:col10/1732130815239/Put/seqid=0 2024-11-20T19:26:56,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130876160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742313_1489 (size=31255) 2024-11-20T19:26:56,172 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=366, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/93916c1e0e7749378c7bcb6d177b1449 2024-11-20T19:26:56,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742314_1490 (size=32073) 2024-11-20T19:26:56,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130876173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130876174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130876174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130876174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,186 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/c2467e0b9d2d4972b51b69730c89cdd6 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c2467e0b9d2d4972b51b69730c89cdd6 2024-11-20T19:26:56,191 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fea4de996b388410b192fd1511bd28fa/A of fea4de996b388410b192fd1511bd28fa into c2467e0b9d2d4972b51b69730c89cdd6(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:56,191 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:56,191 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/A, priority=12, startTime=1732130815980; duration=0sec 2024-11-20T19:26:56,191 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:56,191 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:A 2024-11-20T19:26:56,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742315_1491 (size=13119) 2024-11-20T19:26:56,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/eca22a0f9aa04dc0966975e617b9d7b1 is 50, key is test_row_0/B:col10/1732130815368/Put/seqid=0 2024-11-20T19:26:56,222 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/94220e850d404006869b3699f71d3b66 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/94220e850d404006869b3699f71d3b66 2024-11-20T19:26:56,228 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fea4de996b388410b192fd1511bd28fa/C of fea4de996b388410b192fd1511bd28fa into 94220e850d404006869b3699f71d3b66(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:56,228 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:56,228 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/C, priority=12, startTime=1732130815980; duration=0sec 2024-11-20T19:26:56,229 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:56,229 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:C 2024-11-20T19:26:56,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742316_1492 (size=12301) 2024-11-20T19:26:56,268 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/eca22a0f9aa04dc0966975e617b9d7b1 2024-11-20T19:26:56,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T19:26:56,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/51bd31a655a7455489b24ef543c38b3f is 50, key is test_row_0/C:col10/1732130815368/Put/seqid=0 2024-11-20T19:26:56,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742317_1493 (size=12301) 2024-11-20T19:26:56,325 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/51bd31a655a7455489b24ef543c38b3f 2024-11-20T19:26:56,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/93916c1e0e7749378c7bcb6d177b1449 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/93916c1e0e7749378c7bcb6d177b1449 2024-11-20T19:26:56,336 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/93916c1e0e7749378c7bcb6d177b1449, entries=150, sequenceid=366, filesize=30.5 K 2024-11-20T19:26:56,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/eca22a0f9aa04dc0966975e617b9d7b1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/eca22a0f9aa04dc0966975e617b9d7b1 2024-11-20T19:26:56,344 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/eca22a0f9aa04dc0966975e617b9d7b1, entries=150, sequenceid=366, filesize=12.0 K 2024-11-20T19:26:56,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/51bd31a655a7455489b24ef543c38b3f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/51bd31a655a7455489b24ef543c38b3f 2024-11-20T19:26:56,348 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/51bd31a655a7455489b24ef543c38b3f, entries=150, sequenceid=366, filesize=12.0 K 2024-11-20T19:26:56,349 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for fea4de996b388410b192fd1511bd28fa in 367ms, sequenceid=366, compaction requested=false 2024-11-20T19:26:56,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:56,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:56,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-20T19:26:56,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-20T19:26:56,351 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-20T19:26:56,351 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 672 msec 2024-11-20T19:26:56,352 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 676 msec 2024-11-20T19:26:56,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:56,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T19:26:56,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:56,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:56,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:56,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:56,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:56,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:56,393 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205cafd5d058004cec89975138710807c1_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130816057/Put/seqid=0 2024-11-20T19:26:56,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130876389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130876396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,407 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130876396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,407 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130876397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,407 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130876397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742318_1494 (size=14994) 2024-11-20T19:26:56,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130876498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130876509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130876511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130876511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130876511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39792 deadline: 1732130876703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,719 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39832 deadline: 1732130876713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,719 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39818 deadline: 1732130876713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39808 deadline: 1732130876714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:26:56,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39810 deadline: 1732130876714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-20T19:26:56,780 INFO [Thread-1746 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-20T19:26:56,781 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:26:56,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-11-20T19:26:56,782 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:26:56,783 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:26:56,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T19:26:56,783 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:26:56,828 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:56,833 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205cafd5d058004cec89975138710807c1_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205cafd5d058004cec89975138710807c1_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:56,835 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/4c0ba4d6523d4c63932ab6c9cf675954, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:56,835 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/4c0ba4d6523d4c63932ab6c9cf675954 is 175, key is test_row_0/A:col10/1732130816057/Put/seqid=0 2024-11-20T19:26:56,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742319_1495 (size=39949) 2024-11-20T19:26:56,845 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=393, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/4c0ba4d6523d4c63932ab6c9cf675954 2024-11-20T19:26:56,851 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/0232519362b64271b7e06afa3ae39852 is 50, key is test_row_0/B:col10/1732130816057/Put/seqid=0 2024-11-20T19:26:56,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T19:26:56,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742320_1496 (size=12301) 2024-11-20T19:26:56,894 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/0232519362b64271b7e06afa3ae39852 2024-11-20T19:26:56,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/589cbc436e5e4151bbfd86858205cf6e is 50, key is test_row_0/C:col10/1732130816057/Put/seqid=0 2024-11-20T19:26:56,934 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:56,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T19:26:56,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:56,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:56,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:56,934 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:56,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742321_1497 (size=12301) 2024-11-20T19:26:56,943 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/589cbc436e5e4151bbfd86858205cf6e 2024-11-20T19:26:56,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/4c0ba4d6523d4c63932ab6c9cf675954 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/4c0ba4d6523d4c63932ab6c9cf675954 2024-11-20T19:26:56,951 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/4c0ba4d6523d4c63932ab6c9cf675954, entries=200, sequenceid=393, filesize=39.0 K 2024-11-20T19:26:56,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/0232519362b64271b7e06afa3ae39852 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/0232519362b64271b7e06afa3ae39852 2024-11-20T19:26:56,956 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/0232519362b64271b7e06afa3ae39852, entries=150, sequenceid=393, filesize=12.0 K 2024-11-20T19:26:56,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/589cbc436e5e4151bbfd86858205cf6e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/589cbc436e5e4151bbfd86858205cf6e 2024-11-20T19:26:56,961 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/589cbc436e5e4151bbfd86858205cf6e, entries=150, sequenceid=393, filesize=12.0 K 2024-11-20T19:26:56,961 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for fea4de996b388410b192fd1511bd28fa in 589ms, sequenceid=393, compaction requested=true 2024-11-20T19:26:56,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:56,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:26:56,962 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:56,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:56,962 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:56,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:26:56,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:56,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fea4de996b388410b192fd1511bd28fa:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:26:56,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:56,962 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103277 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:56,962 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/A is initiating minor compaction (all files) 2024-11-20T19:26:56,962 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/A in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:56,962 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:56,962 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/B is initiating minor compaction (all files) 2024-11-20T19:26:56,962 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c2467e0b9d2d4972b51b69730c89cdd6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/93916c1e0e7749378c7bcb6d177b1449, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/4c0ba4d6523d4c63932ab6c9cf675954] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=100.9 K 2024-11-20T19:26:56,962 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/B in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:56,962 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:56,962 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/e34aa23aa69748b2b41e8920ea6a1872, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/eca22a0f9aa04dc0966975e617b9d7b1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/0232519362b64271b7e06afa3ae39852] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=36.8 K 2024-11-20T19:26:56,962 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c2467e0b9d2d4972b51b69730c89cdd6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/93916c1e0e7749378c7bcb6d177b1449, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/4c0ba4d6523d4c63932ab6c9cf675954] 2024-11-20T19:26:56,963 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2467e0b9d2d4972b51b69730c89cdd6, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732130815239 2024-11-20T19:26:56,963 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting e34aa23aa69748b2b41e8920ea6a1872, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732130815239 2024-11-20T19:26:56,963 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93916c1e0e7749378c7bcb6d177b1449, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732130815368 2024-11-20T19:26:56,963 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting eca22a0f9aa04dc0966975e617b9d7b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732130815368 2024-11-20T19:26:56,963 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c0ba4d6523d4c63932ab6c9cf675954, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1732130816055 2024-11-20T19:26:56,964 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 0232519362b64271b7e06afa3ae39852, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1732130816057 2024-11-20T19:26:56,972 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:56,985 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#B#compaction#415 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:56,985 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/588696bd29e6406d91d37807b9d066bf is 50, key is test_row_0/B:col10/1732130816057/Put/seqid=0 2024-11-20T19:26:56,986 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112048808be33be344febc6256bb5e981402_fea4de996b388410b192fd1511bd28fa store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:56,989 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112048808be33be344febc6256bb5e981402_fea4de996b388410b192fd1511bd28fa, store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:56,989 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112048808be33be344febc6256bb5e981402_fea4de996b388410b192fd1511bd28fa because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:56,989 DEBUG [Thread-1751 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6584e9ce to 127.0.0.1:50476 2024-11-20T19:26:56,989 DEBUG [Thread-1751 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:56,991 DEBUG [Thread-1755 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x787e5169 to 127.0.0.1:50476 2024-11-20T19:26:56,991 DEBUG [Thread-1755 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:56,995 DEBUG [Thread-1753 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x37ec8e3b to 127.0.0.1:50476 2024-11-20T19:26:56,995 DEBUG [Thread-1753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:56,996 DEBUG [Thread-1747 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1e247aa1 to 127.0.0.1:50476 2024-11-20T19:26:56,996 DEBUG [Thread-1747 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:56,996 DEBUG [Thread-1749 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2205f666 to 127.0.0.1:50476 2024-11-20T19:26:56,996 DEBUG [Thread-1749 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:57,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742322_1498 (size=13221) 2024-11-20T19:26:57,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742323_1499 (size=4469) 2024-11-20T19:26:57,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:57,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:26:57,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:57,014 DEBUG [Thread-1738 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72f422b4 to 127.0.0.1:50476 2024-11-20T19:26:57,014 DEBUG [Thread-1738 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:57,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:57,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:57,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:57,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:57,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:57,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204dff0a9927974b3b98d13345e3c24987_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130817013/Put/seqid=0 2024-11-20T19:26:57,022 DEBUG [Thread-1740 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2df33cdf to 127.0.0.1:50476 2024-11-20T19:26:57,022 DEBUG [Thread-1740 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:57,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742324_1500 (size=12454) 2024-11-20T19:26:57,023 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:57,023 DEBUG [Thread-1744 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x167a78b0 to 127.0.0.1:50476 2024-11-20T19:26:57,023 DEBUG [Thread-1744 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:57,024 DEBUG [Thread-1742 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09f472e0 to 127.0.0.1:50476 2024-11-20T19:26:57,024 DEBUG [Thread-1736 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3637e4c6 to 127.0.0.1:50476 2024-11-20T19:26:57,024 DEBUG [Thread-1742 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:57,024 DEBUG [Thread-1736 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:26:57,026 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411204dff0a9927974b3b98d13345e3c24987_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204dff0a9927974b3b98d13345e3c24987_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:57,027 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/ac1b2b9d2980406a9fb499777b0be262, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:57,027 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/ac1b2b9d2980406a9fb499777b0be262 is 175, key is test_row_0/A:col10/1732130817013/Put/seqid=0 2024-11-20T19:26:57,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742325_1501 (size=31255) 2024-11-20T19:26:57,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T19:26:57,086 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:57,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T19:26:57,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:57,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:57,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:57,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,238 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:57,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T19:26:57,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:57,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:57,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:57,239 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T19:26:57,391 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:57,391 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T19:26:57,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:57,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:57,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:57,391 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,407 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#A#compaction#414 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:57,407 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/588696bd29e6406d91d37807b9d066bf as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/588696bd29e6406d91d37807b9d066bf 2024-11-20T19:26:57,407 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/ce279d5a4f2c40649c3d92431ad02f97 is 175, key is test_row_0/A:col10/1732130816057/Put/seqid=0 2024-11-20T19:26:57,410 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/B of fea4de996b388410b192fd1511bd28fa into 588696bd29e6406d91d37807b9d066bf(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:57,410 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:57,410 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/B, priority=13, startTime=1732130816962; duration=0sec 2024-11-20T19:26:57,410 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:26:57,410 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:B 2024-11-20T19:26:57,411 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:26:57,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742326_1502 (size=32175) 2024-11-20T19:26:57,412 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:26:57,412 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): fea4de996b388410b192fd1511bd28fa/C is initiating minor compaction (all files) 2024-11-20T19:26:57,412 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fea4de996b388410b192fd1511bd28fa/C in TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:57,412 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/94220e850d404006869b3699f71d3b66, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/51bd31a655a7455489b24ef543c38b3f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/589cbc436e5e4151bbfd86858205cf6e] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp, totalSize=36.8 K 2024-11-20T19:26:57,413 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 94220e850d404006869b3699f71d3b66, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1732130815239 2024-11-20T19:26:57,414 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 51bd31a655a7455489b24ef543c38b3f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732130815368 2024-11-20T19:26:57,414 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 589cbc436e5e4151bbfd86858205cf6e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1732130816057 2024-11-20T19:26:57,416 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/ce279d5a4f2c40649c3d92431ad02f97 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/ce279d5a4f2c40649c3d92431ad02f97 2024-11-20T19:26:57,420 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/A of fea4de996b388410b192fd1511bd28fa into ce279d5a4f2c40649c3d92431ad02f97(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:57,420 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:57,420 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/A, priority=13, startTime=1732130816961; duration=0sec 2024-11-20T19:26:57,420 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:57,420 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:A 2024-11-20T19:26:57,420 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fea4de996b388410b192fd1511bd28fa#C#compaction#417 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:26:57,421 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/0c8626d2acd748f98389f1df56ccb561 is 50, key is test_row_0/C:col10/1732130816057/Put/seqid=0 2024-11-20T19:26:57,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742327_1503 (size=13221) 2024-11-20T19:26:57,427 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/0c8626d2acd748f98389f1df56ccb561 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/0c8626d2acd748f98389f1df56ccb561 2024-11-20T19:26:57,430 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fea4de996b388410b192fd1511bd28fa/C of fea4de996b388410b192fd1511bd28fa into 0c8626d2acd748f98389f1df56ccb561(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:26:57,430 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:57,430 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa., storeName=fea4de996b388410b192fd1511bd28fa/C, priority=13, startTime=1732130816962; duration=0sec 2024-11-20T19:26:57,430 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:26:57,430 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fea4de996b388410b192fd1511bd28fa:C 2024-11-20T19:26:57,432 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=406, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/ac1b2b9d2980406a9fb499777b0be262 2024-11-20T19:26:57,437 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/1b8441caeabf40e4b69bd19fb5c4e3db is 50, key is test_row_0/B:col10/1732130817013/Put/seqid=0 2024-11-20T19:26:57,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742328_1504 (size=12301) 2024-11-20T19:26:57,543 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:57,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T19:26:57,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:57,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:57,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:57,544 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,695 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:57,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T19:26:57,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:57,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:57,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:57,696 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,840 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/1b8441caeabf40e4b69bd19fb5c4e3db 2024-11-20T19:26:57,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/b1ff17c605e04d06880a9da6e86c2588 is 50, key is test_row_0/C:col10/1732130817013/Put/seqid=0 2024-11-20T19:26:57,848 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:57,849 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T19:26:57,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742329_1505 (size=12301) 2024-11-20T19:26:57,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:57,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:57,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:57,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T19:26:58,000 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:58,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T19:26:58,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:58,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:58,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:58,001 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:58,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:58,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:58,152 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:58,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T19:26:58,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:58,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. as already flushing 2024-11-20T19:26:58,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:58,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:58,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:58,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:26:58,249 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/b1ff17c605e04d06880a9da6e86c2588 2024-11-20T19:26:58,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/ac1b2b9d2980406a9fb499777b0be262 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/ac1b2b9d2980406a9fb499777b0be262 2024-11-20T19:26:58,254 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/ac1b2b9d2980406a9fb499777b0be262, entries=150, sequenceid=406, filesize=30.5 K 2024-11-20T19:26:58,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/1b8441caeabf40e4b69bd19fb5c4e3db as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/1b8441caeabf40e4b69bd19fb5c4e3db 2024-11-20T19:26:58,257 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/1b8441caeabf40e4b69bd19fb5c4e3db, entries=150, sequenceid=406, filesize=12.0 K 2024-11-20T19:26:58,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/b1ff17c605e04d06880a9da6e86c2588 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/b1ff17c605e04d06880a9da6e86c2588 2024-11-20T19:26:58,261 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/b1ff17c605e04d06880a9da6e86c2588, entries=150, sequenceid=406, filesize=12.0 K 2024-11-20T19:26:58,261 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=26.84 KB/27480 for fea4de996b388410b192fd1511bd28fa in 1248ms, sequenceid=406, compaction requested=false 2024-11-20T19:26:58,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:58,305 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:26:58,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-20T19:26:58,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:58,305 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing fea4de996b388410b192fd1511bd28fa 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T19:26:58,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=A 2024-11-20T19:26:58,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:58,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=B 2024-11-20T19:26:58,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:58,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fea4de996b388410b192fd1511bd28fa, store=C 2024-11-20T19:26:58,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:26:58,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203d27420caaa742d39d07f8f51f5b6007_fea4de996b388410b192fd1511bd28fa is 50, key is test_row_0/A:col10/1732130817023/Put/seqid=0 2024-11-20T19:26:58,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742330_1506 (size=12454) 2024-11-20T19:26:58,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:26:58,716 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203d27420caaa742d39d07f8f51f5b6007_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203d27420caaa742d39d07f8f51f5b6007_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:26:58,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/87a419a523474a0faa0a994f0fb62a44, store: [table=TestAcidGuarantees family=A region=fea4de996b388410b192fd1511bd28fa] 2024-11-20T19:26:58,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/87a419a523474a0faa0a994f0fb62a44 is 175, key is test_row_0/A:col10/1732130817023/Put/seqid=0 2024-11-20T19:26:58,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742331_1507 (size=31255) 2024-11-20T19:26:58,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T19:26:59,121 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=416, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/87a419a523474a0faa0a994f0fb62a44 2024-11-20T19:26:59,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/6aac7fdcc68f43b3a57b1c66606acf5b is 50, key is test_row_0/B:col10/1732130817023/Put/seqid=0 2024-11-20T19:26:59,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742332_1508 (size=12301) 2024-11-20T19:26:59,529 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/6aac7fdcc68f43b3a57b1c66606acf5b 2024-11-20T19:26:59,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/9d3eae136c4d47fea89146a272bf5994 is 50, key is test_row_0/C:col10/1732130817023/Put/seqid=0 2024-11-20T19:26:59,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742333_1509 (size=12301) 2024-11-20T19:26:59,938 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/9d3eae136c4d47fea89146a272bf5994 2024-11-20T19:26:59,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/A/87a419a523474a0faa0a994f0fb62a44 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/87a419a523474a0faa0a994f0fb62a44 2024-11-20T19:26:59,943 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/87a419a523474a0faa0a994f0fb62a44, entries=150, sequenceid=416, filesize=30.5 K 2024-11-20T19:26:59,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/B/6aac7fdcc68f43b3a57b1c66606acf5b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/6aac7fdcc68f43b3a57b1c66606acf5b 2024-11-20T19:26:59,946 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/6aac7fdcc68f43b3a57b1c66606acf5b, entries=150, sequenceid=416, filesize=12.0 K 2024-11-20T19:26:59,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/.tmp/C/9d3eae136c4d47fea89146a272bf5994 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/9d3eae136c4d47fea89146a272bf5994 2024-11-20T19:26:59,949 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/9d3eae136c4d47fea89146a272bf5994, entries=150, sequenceid=416, filesize=12.0 K 2024-11-20T19:26:59,949 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for fea4de996b388410b192fd1511bd28fa in 1644ms, sequenceid=416, compaction requested=true 2024-11-20T19:26:59,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:26:59,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:26:59,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-20T19:26:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-11-20T19:26:59,951 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-20T19:26:59,951 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1670 sec 2024-11-20T19:26:59,953 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 3.1710 sec 2024-11-20T19:27:00,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-20T19:27:00,888 INFO [Thread-1746 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 74 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 46 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 80 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 46 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1650 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4950 rows 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1652 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4956 rows 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1635 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4905 rows 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1653 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4959 rows 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1638 2024-11-20T19:27:00,888 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4914 rows 2024-11-20T19:27:00,888 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T19:27:00,888 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1df61dc9 to 127.0.0.1:50476 2024-11-20T19:27:00,888 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:00,889 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T19:27:00,890 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T19:27:00,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:00,893 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130820893"}]},"ts":"1732130820893"} 2024-11-20T19:27:00,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T19:27:00,895 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T19:27:00,941 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T19:27:00,942 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:27:00,943 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fea4de996b388410b192fd1511bd28fa, UNASSIGN}] 2024-11-20T19:27:00,944 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fea4de996b388410b192fd1511bd28fa, UNASSIGN 2024-11-20T19:27:00,944 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=fea4de996b388410b192fd1511bd28fa, regionState=CLOSING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:00,945 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:27:00,945 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; CloseRegionProcedure fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:27:00,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T19:27:01,096 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:01,097 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(124): Close fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,097 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:27:01,097 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1681): Closing fea4de996b388410b192fd1511bd28fa, disabling compactions & flushes 2024-11-20T19:27:01,097 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:27:01,097 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:27:01,097 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. after waiting 0 ms 2024-11-20T19:27:01,097 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:27:01,097 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/8289b3c96c454d81bfbb1fe806c7a026, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/64e1d715b2a64b4c857dd35300bf0468, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c8e1b132fd874ba087248d32f959688b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/0b415765e8a9485cb97b3385bc5ac1fd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f396d3c88a1c471bbce12f73f56991e4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f8cea79454954cc189dc588be536078b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d15ce8b2fb7f4951a1bd0b7dd356786c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/cf365753500a44e0894f7a423b730d66, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/a7b44f8407c34845ad23b46dc8e81807, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c9873e94cfc142fb99b30cb6d897c4d6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/9b4ba5ff8d89444f8c4e66602789601e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/acc179ed329541c18707595c00310494, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/30d602d8eb48483f9d0ff0e8143f6e29, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/6f9ec4134ac847e0ad68e5b440408355, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f978ebb7748143d0ac152ccf6b6abe70, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d0cd60554da74c03a96f54615515ba52, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/19442f20e8384f91909ead524fe39f39, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/cdc88d0d8bf24ae38b992c9c7fb07289, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/7724c1220eb94b8fad3677045859bb6e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/dbc1080fbd324aa68fe3150022421f4a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/b7e5a606e31340ffbe9563597fc2b429, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/2b03e83cda6141e7a50bdf5a872bd2e8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/a339a3e0350748dc89d778c8e4c3a96a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d528567fe7364582bd0992c8a31154be, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c2467e0b9d2d4972b51b69730c89cdd6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/93916c1e0e7749378c7bcb6d177b1449, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/4c0ba4d6523d4c63932ab6c9cf675954] to archive 2024-11-20T19:27:01,098 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:01,099 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/8289b3c96c454d81bfbb1fe806c7a026 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/8289b3c96c454d81bfbb1fe806c7a026 2024-11-20T19:27:01,101 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/64e1d715b2a64b4c857dd35300bf0468 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/64e1d715b2a64b4c857dd35300bf0468 2024-11-20T19:27:01,102 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c8e1b132fd874ba087248d32f959688b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c8e1b132fd874ba087248d32f959688b 2024-11-20T19:27:01,102 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/0b415765e8a9485cb97b3385bc5ac1fd to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/0b415765e8a9485cb97b3385bc5ac1fd 2024-11-20T19:27:01,103 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f396d3c88a1c471bbce12f73f56991e4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f396d3c88a1c471bbce12f73f56991e4 2024-11-20T19:27:01,104 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f8cea79454954cc189dc588be536078b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f8cea79454954cc189dc588be536078b 2024-11-20T19:27:01,105 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d15ce8b2fb7f4951a1bd0b7dd356786c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d15ce8b2fb7f4951a1bd0b7dd356786c 2024-11-20T19:27:01,106 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/cf365753500a44e0894f7a423b730d66 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/cf365753500a44e0894f7a423b730d66 2024-11-20T19:27:01,106 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/a7b44f8407c34845ad23b46dc8e81807 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/a7b44f8407c34845ad23b46dc8e81807 2024-11-20T19:27:01,107 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c9873e94cfc142fb99b30cb6d897c4d6 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c9873e94cfc142fb99b30cb6d897c4d6 2024-11-20T19:27:01,108 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/9b4ba5ff8d89444f8c4e66602789601e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/9b4ba5ff8d89444f8c4e66602789601e 2024-11-20T19:27:01,108 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/acc179ed329541c18707595c00310494 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/acc179ed329541c18707595c00310494 2024-11-20T19:27:01,109 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/30d602d8eb48483f9d0ff0e8143f6e29 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/30d602d8eb48483f9d0ff0e8143f6e29 2024-11-20T19:27:01,110 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/6f9ec4134ac847e0ad68e5b440408355 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/6f9ec4134ac847e0ad68e5b440408355 2024-11-20T19:27:01,110 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f978ebb7748143d0ac152ccf6b6abe70 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/f978ebb7748143d0ac152ccf6b6abe70 2024-11-20T19:27:01,111 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d0cd60554da74c03a96f54615515ba52 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d0cd60554da74c03a96f54615515ba52 2024-11-20T19:27:01,112 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/19442f20e8384f91909ead524fe39f39 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/19442f20e8384f91909ead524fe39f39 2024-11-20T19:27:01,112 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/cdc88d0d8bf24ae38b992c9c7fb07289 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/cdc88d0d8bf24ae38b992c9c7fb07289 2024-11-20T19:27:01,113 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/7724c1220eb94b8fad3677045859bb6e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/7724c1220eb94b8fad3677045859bb6e 2024-11-20T19:27:01,114 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/dbc1080fbd324aa68fe3150022421f4a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/dbc1080fbd324aa68fe3150022421f4a 2024-11-20T19:27:01,115 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/b7e5a606e31340ffbe9563597fc2b429 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/b7e5a606e31340ffbe9563597fc2b429 2024-11-20T19:27:01,116 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/2b03e83cda6141e7a50bdf5a872bd2e8 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/2b03e83cda6141e7a50bdf5a872bd2e8 2024-11-20T19:27:01,116 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/a339a3e0350748dc89d778c8e4c3a96a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/a339a3e0350748dc89d778c8e4c3a96a 2024-11-20T19:27:01,117 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d528567fe7364582bd0992c8a31154be to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/d528567fe7364582bd0992c8a31154be 2024-11-20T19:27:01,118 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c2467e0b9d2d4972b51b69730c89cdd6 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/c2467e0b9d2d4972b51b69730c89cdd6 2024-11-20T19:27:01,119 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/93916c1e0e7749378c7bcb6d177b1449 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/93916c1e0e7749378c7bcb6d177b1449 2024-11-20T19:27:01,119 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/4c0ba4d6523d4c63932ab6c9cf675954 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/4c0ba4d6523d4c63932ab6c9cf675954 2024-11-20T19:27:01,121 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/1e827f2dfecb49eaacaa1ef2dc970786, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/74c2b6bc48bf41498e12d917cf7eb4ac, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/41e1fe1594f845bc91334a2c01cfe9ad, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/9e3a00a20b2d493d98bfc728013ae654, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/8f6653cb4d774e7cbf57d04b0c36258b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/8b111285a92b40be84016a7bf68824b2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/f4c85bab2d5f4c839afeb40d0e43b1de, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/693b25271063405fa697fd4c3bbd1242, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/0b6622d32c8940c2a0b357cca45f1794, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/d621885a67f745e6ad5ab668ced4a843, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/82b65e4e49624deeb0c85c42bf50f769, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/9a43ae8d8490409ea5fb7a67e0324809, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/69cfaa2822b14966b03ff6d80f810c0d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/a588755a63004b5e8d3ab486d3118c5c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/ca8cf4f4591d467ca9c3fc0788829728, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/104fd00d6cf6464c9b7c48781a0482e6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/1431b7e07a1540598d8bc7e5f5d59aa2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/fb3373cd662c49f59a4b71dc1a9357fb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/7dc631225b6e4181a1ab617741797425, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/b12dde4018fe4eeba88885a165107456, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/ed5a1144df3c4032bb27381d2b2a2e97, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/7c0430e3f33346fa8f54cc4b28109991, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/4aeffbecb5c74df9b0da602b9cdd7ed3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/e34aa23aa69748b2b41e8920ea6a1872, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/001b783f95644469af459484a6cdbe8a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/eca22a0f9aa04dc0966975e617b9d7b1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/0232519362b64271b7e06afa3ae39852] to archive 2024-11-20T19:27:01,121 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:01,122 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/1e827f2dfecb49eaacaa1ef2dc970786 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/1e827f2dfecb49eaacaa1ef2dc970786 2024-11-20T19:27:01,123 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/74c2b6bc48bf41498e12d917cf7eb4ac to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/74c2b6bc48bf41498e12d917cf7eb4ac 2024-11-20T19:27:01,123 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/41e1fe1594f845bc91334a2c01cfe9ad to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/41e1fe1594f845bc91334a2c01cfe9ad 2024-11-20T19:27:01,125 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/9e3a00a20b2d493d98bfc728013ae654 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/9e3a00a20b2d493d98bfc728013ae654 2024-11-20T19:27:01,126 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/8f6653cb4d774e7cbf57d04b0c36258b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/8f6653cb4d774e7cbf57d04b0c36258b 2024-11-20T19:27:01,127 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/8b111285a92b40be84016a7bf68824b2 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/8b111285a92b40be84016a7bf68824b2 2024-11-20T19:27:01,128 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/f4c85bab2d5f4c839afeb40d0e43b1de to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/f4c85bab2d5f4c839afeb40d0e43b1de 2024-11-20T19:27:01,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/693b25271063405fa697fd4c3bbd1242 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/693b25271063405fa697fd4c3bbd1242 2024-11-20T19:27:01,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/0b6622d32c8940c2a0b357cca45f1794 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/0b6622d32c8940c2a0b357cca45f1794 2024-11-20T19:27:01,130 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/d621885a67f745e6ad5ab668ced4a843 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/d621885a67f745e6ad5ab668ced4a843 2024-11-20T19:27:01,131 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/82b65e4e49624deeb0c85c42bf50f769 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/82b65e4e49624deeb0c85c42bf50f769 2024-11-20T19:27:01,132 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/9a43ae8d8490409ea5fb7a67e0324809 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/9a43ae8d8490409ea5fb7a67e0324809 2024-11-20T19:27:01,133 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/69cfaa2822b14966b03ff6d80f810c0d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/69cfaa2822b14966b03ff6d80f810c0d 2024-11-20T19:27:01,134 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/a588755a63004b5e8d3ab486d3118c5c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/a588755a63004b5e8d3ab486d3118c5c 2024-11-20T19:27:01,134 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/ca8cf4f4591d467ca9c3fc0788829728 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/ca8cf4f4591d467ca9c3fc0788829728 2024-11-20T19:27:01,135 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/104fd00d6cf6464c9b7c48781a0482e6 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/104fd00d6cf6464c9b7c48781a0482e6 2024-11-20T19:27:01,136 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/1431b7e07a1540598d8bc7e5f5d59aa2 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/1431b7e07a1540598d8bc7e5f5d59aa2 2024-11-20T19:27:01,137 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/fb3373cd662c49f59a4b71dc1a9357fb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/fb3373cd662c49f59a4b71dc1a9357fb 2024-11-20T19:27:01,139 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/7dc631225b6e4181a1ab617741797425 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/7dc631225b6e4181a1ab617741797425 2024-11-20T19:27:01,139 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/b12dde4018fe4eeba88885a165107456 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/b12dde4018fe4eeba88885a165107456 2024-11-20T19:27:01,140 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/ed5a1144df3c4032bb27381d2b2a2e97 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/ed5a1144df3c4032bb27381d2b2a2e97 2024-11-20T19:27:01,141 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/7c0430e3f33346fa8f54cc4b28109991 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/7c0430e3f33346fa8f54cc4b28109991 2024-11-20T19:27:01,142 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/4aeffbecb5c74df9b0da602b9cdd7ed3 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/4aeffbecb5c74df9b0da602b9cdd7ed3 2024-11-20T19:27:01,142 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/e34aa23aa69748b2b41e8920ea6a1872 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/e34aa23aa69748b2b41e8920ea6a1872 2024-11-20T19:27:01,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/001b783f95644469af459484a6cdbe8a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/001b783f95644469af459484a6cdbe8a 2024-11-20T19:27:01,145 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/eca22a0f9aa04dc0966975e617b9d7b1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/eca22a0f9aa04dc0966975e617b9d7b1 2024-11-20T19:27:01,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/0232519362b64271b7e06afa3ae39852 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/0232519362b64271b7e06afa3ae39852 2024-11-20T19:27:01,147 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/98e07125fddb48cfaed880a446739450, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/00c32907460e4b9a9e4040ec9521daa4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c0d873304ab04c13a1020b0c18e95e50, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/eefd6d69572c4090af99b5b701a9f2d0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/9bba6115a6ec46cc819f34f12de7991c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/e6a4bb766b0e4debb7353c15a15c9286, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/34b05360040e42b794b823f7b88be3ec, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/cc6d12b5528a4be2a87fba1282558282, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/d75ce22f6cb54b2589ca7038d312bb8e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/84e0f7404b7a40dcacd05bf5949fc9a5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/8c20c3b0a8a64109ad6f25e6a94e2580, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/ae764e832f444ff38e2afcb89624404e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/2d9abc75c9a6498c81390cd03515df65, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/70903dd3ed8f40619b66d50442c51e5d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c546d0d71d3c4bda9b99299c4ff5f0eb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c07b16db976e4e9893196e2582f3bcef, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/bacafbc4fa8544a9a6cf4c199183c7bf, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c131f3dc88a54df5bdd3858c768522e7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/d67b144e37bf453c9fb753ad919d6103, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/72487fa661674e18b8bc2ecab402924c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/0dbe77dc404e4a1ba62718e28f17bc7c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/221d87424f464c379164b9d5c9a05411, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/ccd79bf9ccb8416c947efec8499b462b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/94220e850d404006869b3699f71d3b66, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/2936606a67734f80a83be1ec2af66c07, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/51bd31a655a7455489b24ef543c38b3f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/589cbc436e5e4151bbfd86858205cf6e] to archive 2024-11-20T19:27:01,148 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:01,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/98e07125fddb48cfaed880a446739450 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/98e07125fddb48cfaed880a446739450 2024-11-20T19:27:01,150 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/00c32907460e4b9a9e4040ec9521daa4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/00c32907460e4b9a9e4040ec9521daa4 2024-11-20T19:27:01,152 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c0d873304ab04c13a1020b0c18e95e50 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c0d873304ab04c13a1020b0c18e95e50 2024-11-20T19:27:01,152 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/eefd6d69572c4090af99b5b701a9f2d0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/eefd6d69572c4090af99b5b701a9f2d0 2024-11-20T19:27:01,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/9bba6115a6ec46cc819f34f12de7991c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/9bba6115a6ec46cc819f34f12de7991c 2024-11-20T19:27:01,154 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/e6a4bb766b0e4debb7353c15a15c9286 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/e6a4bb766b0e4debb7353c15a15c9286 2024-11-20T19:27:01,155 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/34b05360040e42b794b823f7b88be3ec to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/34b05360040e42b794b823f7b88be3ec 2024-11-20T19:27:01,156 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/cc6d12b5528a4be2a87fba1282558282 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/cc6d12b5528a4be2a87fba1282558282 2024-11-20T19:27:01,156 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/d75ce22f6cb54b2589ca7038d312bb8e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/d75ce22f6cb54b2589ca7038d312bb8e 2024-11-20T19:27:01,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/84e0f7404b7a40dcacd05bf5949fc9a5 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/84e0f7404b7a40dcacd05bf5949fc9a5 2024-11-20T19:27:01,158 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/8c20c3b0a8a64109ad6f25e6a94e2580 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/8c20c3b0a8a64109ad6f25e6a94e2580 2024-11-20T19:27:01,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/ae764e832f444ff38e2afcb89624404e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/ae764e832f444ff38e2afcb89624404e 2024-11-20T19:27:01,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/2d9abc75c9a6498c81390cd03515df65 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/2d9abc75c9a6498c81390cd03515df65 2024-11-20T19:27:01,160 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/70903dd3ed8f40619b66d50442c51e5d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/70903dd3ed8f40619b66d50442c51e5d 2024-11-20T19:27:01,160 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c546d0d71d3c4bda9b99299c4ff5f0eb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c546d0d71d3c4bda9b99299c4ff5f0eb 2024-11-20T19:27:01,161 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c07b16db976e4e9893196e2582f3bcef to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c07b16db976e4e9893196e2582f3bcef 2024-11-20T19:27:01,162 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/bacafbc4fa8544a9a6cf4c199183c7bf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/bacafbc4fa8544a9a6cf4c199183c7bf 2024-11-20T19:27:01,163 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c131f3dc88a54df5bdd3858c768522e7 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/c131f3dc88a54df5bdd3858c768522e7 2024-11-20T19:27:01,163 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/d67b144e37bf453c9fb753ad919d6103 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/d67b144e37bf453c9fb753ad919d6103 2024-11-20T19:27:01,165 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/72487fa661674e18b8bc2ecab402924c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/72487fa661674e18b8bc2ecab402924c 2024-11-20T19:27:01,166 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/0dbe77dc404e4a1ba62718e28f17bc7c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/0dbe77dc404e4a1ba62718e28f17bc7c 2024-11-20T19:27:01,167 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/221d87424f464c379164b9d5c9a05411 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/221d87424f464c379164b9d5c9a05411 2024-11-20T19:27:01,168 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/ccd79bf9ccb8416c947efec8499b462b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/ccd79bf9ccb8416c947efec8499b462b 2024-11-20T19:27:01,169 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/94220e850d404006869b3699f71d3b66 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/94220e850d404006869b3699f71d3b66 2024-11-20T19:27:01,170 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/2936606a67734f80a83be1ec2af66c07 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/2936606a67734f80a83be1ec2af66c07 2024-11-20T19:27:01,171 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/51bd31a655a7455489b24ef543c38b3f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/51bd31a655a7455489b24ef543c38b3f 2024-11-20T19:27:01,172 DEBUG [StoreCloser-TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/589cbc436e5e4151bbfd86858205cf6e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/589cbc436e5e4151bbfd86858205cf6e 2024-11-20T19:27:01,176 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/recovered.edits/419.seqid, newMaxSeqId=419, maxSeqId=4 2024-11-20T19:27:01,176 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa. 2024-11-20T19:27:01,176 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1635): Region close journal for fea4de996b388410b192fd1511bd28fa: 2024-11-20T19:27:01,177 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(170): Closed fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,178 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=fea4de996b388410b192fd1511bd28fa, regionState=CLOSED 2024-11-20T19:27:01,179 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-20T19:27:01,179 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseRegionProcedure fea4de996b388410b192fd1511bd28fa, server=db9c3a6c6492,35979,1732130703276 in 233 msec 2024-11-20T19:27:01,180 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=124, resume processing ppid=123 2024-11-20T19:27:01,180 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, ppid=123, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=fea4de996b388410b192fd1511bd28fa, UNASSIGN in 236 msec 2024-11-20T19:27:01,181 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-20T19:27:01,181 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 238 msec 2024-11-20T19:27:01,182 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130821182"}]},"ts":"1732130821182"} 2024-11-20T19:27:01,182 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T19:27:01,191 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T19:27:01,192 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 302 msec 2024-11-20T19:27:01,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-20T19:27:01,196 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-20T19:27:01,196 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T19:27:01,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:01,197 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:01,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T19:27:01,198 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=126, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:01,199 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,201 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/recovered.edits] 2024-11-20T19:27:01,203 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/87a419a523474a0faa0a994f0fb62a44 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/87a419a523474a0faa0a994f0fb62a44 2024-11-20T19:27:01,204 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/ac1b2b9d2980406a9fb499777b0be262 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/ac1b2b9d2980406a9fb499777b0be262 2024-11-20T19:27:01,204 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/ce279d5a4f2c40649c3d92431ad02f97 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/A/ce279d5a4f2c40649c3d92431ad02f97 2024-11-20T19:27:01,206 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/1b8441caeabf40e4b69bd19fb5c4e3db to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/1b8441caeabf40e4b69bd19fb5c4e3db 2024-11-20T19:27:01,206 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/588696bd29e6406d91d37807b9d066bf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/588696bd29e6406d91d37807b9d066bf 2024-11-20T19:27:01,207 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/6aac7fdcc68f43b3a57b1c66606acf5b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/B/6aac7fdcc68f43b3a57b1c66606acf5b 2024-11-20T19:27:01,208 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/0c8626d2acd748f98389f1df56ccb561 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/0c8626d2acd748f98389f1df56ccb561 2024-11-20T19:27:01,209 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/9d3eae136c4d47fea89146a272bf5994 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/9d3eae136c4d47fea89146a272bf5994 2024-11-20T19:27:01,210 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/b1ff17c605e04d06880a9da6e86c2588 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/C/b1ff17c605e04d06880a9da6e86c2588 2024-11-20T19:27:01,212 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/recovered.edits/419.seqid to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa/recovered.edits/419.seqid 2024-11-20T19:27:01,213 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,213 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T19:27:01,213 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T19:27:01,213 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T19:27:01,216 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200ade6be11ca54bd7b75dd062032e3a50_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200ade6be11ca54bd7b75dd062032e3a50_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,217 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112025c173ae15bd4b4ebfb38df0a9bb5ed0_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112025c173ae15bd4b4ebfb38df0a9bb5ed0_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,218 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202e47c26a72ae41dda6031c037945fcb7_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202e47c26a72ae41dda6031c037945fcb7_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,219 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112035cf29082dc146e399fc26585cd48f70_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112035cf29082dc146e399fc26585cd48f70_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,219 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203d27420caaa742d39d07f8f51f5b6007_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411203d27420caaa742d39d07f8f51f5b6007_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,220 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120492663a30d0649699fdf13b94d50db1a_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120492663a30d0649699fdf13b94d50db1a_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,221 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204dff0a9927974b3b98d13345e3c24987_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411204dff0a9927974b3b98d13345e3c24987_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,222 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205190add2ff444f14b32fa9798a620726_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205190add2ff444f14b32fa9798a620726_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,223 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205cafd5d058004cec89975138710807c1_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205cafd5d058004cec89975138710807c1_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,224 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205d0fe7d967ac4630afd211c00f23a280_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205d0fe7d967ac4630afd211c00f23a280_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,224 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206378675cfc9a425597ecaa01dd6f2a59_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411206378675cfc9a425597ecaa01dd6f2a59_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,225 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120812f59108f76458cad3dd75eac0d421f_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120812f59108f76458cad3dd75eac0d421f_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,226 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120901a44d3f09c42bbbec154786dd77343_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120901a44d3f09c42bbbec154786dd77343_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,227 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209926ad479efe404e84cbc1aa4690f771_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411209926ad479efe404e84cbc1aa4690f771_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,228 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112099f74c0e72c1443fba1d4d93ebb2b994_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112099f74c0e72c1443fba1d4d93ebb2b994_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,229 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b24b9743525240cc8199b8af67496350_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b24b9743525240cc8199b8af67496350_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,230 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ca37001fbeac4aacbecc91f985ada9db_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120ca37001fbeac4aacbecc91f985ada9db_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,230 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d737fd63a0f64f26b9e197b71c2eed5f_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d737fd63a0f64f26b9e197b71c2eed5f_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,231 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d8dd23c32bc04ccab4840ad4ba92ed6b_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d8dd23c32bc04ccab4840ad4ba92ed6b_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,232 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dda8839828ab4ea4ac5b5391f6be06a3_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dda8839828ab4ea4ac5b5391f6be06a3_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,232 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e31fded4768e462c8b461329b40a848a_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120e31fded4768e462c8b461329b40a848a_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,233 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fb00ed0cce11481aa41b3c1ef4037faa_fea4de996b388410b192fd1511bd28fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120fb00ed0cce11481aa41b3c1ef4037faa_fea4de996b388410b192fd1511bd28fa 2024-11-20T19:27:01,233 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T19:27:01,235 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=126, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:01,236 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T19:27:01,238 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T19:27:01,239 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=126, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:01,239 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T19:27:01,239 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732130821239"}]},"ts":"9223372036854775807"} 2024-11-20T19:27:01,244 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T19:27:01,244 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => fea4de996b388410b192fd1511bd28fa, NAME => 'TestAcidGuarantees,,1732130793780.fea4de996b388410b192fd1511bd28fa.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T19:27:01,244 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T19:27:01,244 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732130821244"}]},"ts":"9223372036854775807"} 2024-11-20T19:27:01,245 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T19:27:01,250 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=126, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:01,251 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 54 msec 2024-11-20T19:27:01,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-20T19:27:01,299 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-20T19:27:01,308 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=241 (was 240) - Thread LEAK? -, OpenFileDescriptor=467 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=685 (was 694), ProcessCount=11 (was 11), AvailableMemoryMB=3236 (was 3378) 2024-11-20T19:27:01,316 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=241, OpenFileDescriptor=467, MaxFileDescriptor=1048576, SystemLoadAverage=685, ProcessCount=11, AvailableMemoryMB=3236 2024-11-20T19:27:01,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:27:01,317 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:27:01,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:01,319 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T19:27:01,319 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:01,319 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 127 2024-11-20T19:27:01,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-20T19:27:01,320 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T19:27:01,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742334_1510 (size=963) 2024-11-20T19:27:01,329 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d 2024-11-20T19:27:01,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742335_1511 (size=53) 2024-11-20T19:27:01,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-20T19:27:01,574 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T19:27:01,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-20T19:27:01,735 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:27:01,735 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 794552c8bce342231c204cc0e02fbebc, disabling compactions & flushes 2024-11-20T19:27:01,735 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:01,735 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:01,735 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. after waiting 0 ms 2024-11-20T19:27:01,735 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:01,735 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:01,735 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:01,736 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T19:27:01,736 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732130821736"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732130821736"}]},"ts":"1732130821736"} 2024-11-20T19:27:01,737 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T19:27:01,738 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T19:27:01,738 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130821738"}]},"ts":"1732130821738"} 2024-11-20T19:27:01,739 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T19:27:01,792 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=794552c8bce342231c204cc0e02fbebc, ASSIGN}] 2024-11-20T19:27:01,793 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=794552c8bce342231c204cc0e02fbebc, ASSIGN 2024-11-20T19:27:01,793 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=794552c8bce342231c204cc0e02fbebc, ASSIGN; state=OFFLINE, location=db9c3a6c6492,35979,1732130703276; forceNewPlan=false, retain=false 2024-11-20T19:27:01,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-20T19:27:01,944 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=794552c8bce342231c204cc0e02fbebc, regionState=OPENING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:01,945 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; OpenRegionProcedure 794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:27:02,095 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,097 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:02,097 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7285): Opening region: {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:27:02,098 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:02,098 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:27:02,098 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7327): checking encryption for 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:02,098 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7330): checking classloading for 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:02,099 INFO [StoreOpener-794552c8bce342231c204cc0e02fbebc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:02,100 INFO [StoreOpener-794552c8bce342231c204cc0e02fbebc-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:02,100 INFO [StoreOpener-794552c8bce342231c204cc0e02fbebc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 794552c8bce342231c204cc0e02fbebc columnFamilyName A 2024-11-20T19:27:02,100 DEBUG [StoreOpener-794552c8bce342231c204cc0e02fbebc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:02,100 INFO [StoreOpener-794552c8bce342231c204cc0e02fbebc-1 {}] regionserver.HStore(327): Store=794552c8bce342231c204cc0e02fbebc/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:02,100 INFO [StoreOpener-794552c8bce342231c204cc0e02fbebc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:02,101 INFO [StoreOpener-794552c8bce342231c204cc0e02fbebc-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:02,101 INFO [StoreOpener-794552c8bce342231c204cc0e02fbebc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 794552c8bce342231c204cc0e02fbebc columnFamilyName B 2024-11-20T19:27:02,102 DEBUG [StoreOpener-794552c8bce342231c204cc0e02fbebc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:02,102 INFO [StoreOpener-794552c8bce342231c204cc0e02fbebc-1 {}] regionserver.HStore(327): Store=794552c8bce342231c204cc0e02fbebc/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:02,102 INFO [StoreOpener-794552c8bce342231c204cc0e02fbebc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:02,103 INFO [StoreOpener-794552c8bce342231c204cc0e02fbebc-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:02,103 INFO [StoreOpener-794552c8bce342231c204cc0e02fbebc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 794552c8bce342231c204cc0e02fbebc columnFamilyName C 2024-11-20T19:27:02,103 DEBUG [StoreOpener-794552c8bce342231c204cc0e02fbebc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:02,104 INFO [StoreOpener-794552c8bce342231c204cc0e02fbebc-1 {}] regionserver.HStore(327): Store=794552c8bce342231c204cc0e02fbebc/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:02,104 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:02,105 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:02,105 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:02,106 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:27:02,107 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1085): writing seq id for 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:02,111 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:27:02,111 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1102): Opened 794552c8bce342231c204cc0e02fbebc; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59342339, jitterRate=-0.11573024094104767}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:27:02,112 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1001): Region open journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:02,112 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., pid=129, masterSystemTime=1732130822095 2024-11-20T19:27:02,113 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:02,113 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:02,114 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=794552c8bce342231c204cc0e02fbebc, regionState=OPEN, openSeqNum=2, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,115 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-20T19:27:02,116 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; OpenRegionProcedure 794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 in 169 msec 2024-11-20T19:27:02,117 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=128, resume processing ppid=127 2024-11-20T19:27:02,117 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, ppid=127, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=794552c8bce342231c204cc0e02fbebc, ASSIGN in 325 msec 2024-11-20T19:27:02,117 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T19:27:02,117 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130822117"}]},"ts":"1732130822117"} 2024-11-20T19:27:02,118 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T19:27:02,159 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T19:27:02,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 842 msec 2024-11-20T19:27:02,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-20T19:27:02,423 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 127 completed 2024-11-20T19:27:02,424 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x695c2253 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63cefe40 2024-11-20T19:27:02,434 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32c12a30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:02,434 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:02,435 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47224, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:02,436 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T19:27:02,437 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56344, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T19:27:02,438 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7177efc9 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@65df2359 2024-11-20T19:27:02,450 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ef40578, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:02,451 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61d38088 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d0ab200 2024-11-20T19:27:02,459 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32bb71c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:02,460 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7043f683 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5871c039 2024-11-20T19:27:02,467 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bc0f7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:02,468 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b0c2472 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7daa5922 2024-11-20T19:27:02,475 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b8b6e04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:02,476 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34b30c39 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b7f20c4 2024-11-20T19:27:02,483 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bc486e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:02,484 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f7c40ba to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2070263a 2024-11-20T19:27:02,492 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7861b162, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:02,493 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x41b0e7b6 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6050584c 2024-11-20T19:27:02,500 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@154f0f85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:02,501 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0f2423f3 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6dd48863 2024-11-20T19:27:02,517 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a917b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:02,518 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x184771cf to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51196534 2024-11-20T19:27:02,525 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54c2725, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:02,526 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x076f0408 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1dc5e114 2024-11-20T19:27:02,534 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79d49886, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:02,545 DEBUG [hconnection-0x39702952-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:02,546 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47230, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:02,547 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:02,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-20T19:27:02,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T19:27:02,548 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:02,550 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:02,550 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:02,559 DEBUG [hconnection-0x57d250c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:02,560 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47234, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:02,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:02,561 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:27:02,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:02,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:02,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:02,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:02,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:02,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:02,575 DEBUG [hconnection-0x1dd9b4f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:02,576 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47250, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:02,577 DEBUG [hconnection-0x7d075359-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:02,578 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47264, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:02,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130882578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130882579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130882579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,587 DEBUG [hconnection-0x744b029f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:02,588 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47278, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:02,595 DEBUG [hconnection-0x3f79e134-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:02,596 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47294, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:02,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130882597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/34519464bb4945e883d212eceb2aa000 is 50, key is test_row_0/A:col10/1732130822560/Put/seqid=0 2024-11-20T19:27:02,619 DEBUG [hconnection-0x4581ea4b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:02,620 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47306, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:02,631 DEBUG [hconnection-0x741e27a9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:02,632 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47322, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:02,636 DEBUG [hconnection-0x30a017b8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:02,638 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47326, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:02,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130882639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742336_1512 (size=12001) 2024-11-20T19:27:02,643 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/34519464bb4945e883d212eceb2aa000 2024-11-20T19:27:02,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T19:27:02,651 DEBUG [hconnection-0x1ab2f3ba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:02,652 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47330, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:02,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130882680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130882680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130882680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,691 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/1fa19907bd1f41f98e88f292a6b907b8 is 50, key is test_row_0/B:col10/1732130822560/Put/seqid=0 2024-11-20T19:27:02,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130882698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,701 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T19:27:02,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:02,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:02,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:02,702 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:02,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:02,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:02,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742337_1513 (size=12001) 2024-11-20T19:27:02,729 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/1fa19907bd1f41f98e88f292a6b907b8 2024-11-20T19:27:02,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130882743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,763 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/597c4274dfc1447ebc5bb92c20167757 is 50, key is test_row_0/C:col10/1732130822560/Put/seqid=0 2024-11-20T19:27:02,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742338_1514 (size=12001) 2024-11-20T19:27:02,790 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/597c4274dfc1447ebc5bb92c20167757 2024-11-20T19:27:02,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/34519464bb4945e883d212eceb2aa000 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/34519464bb4945e883d212eceb2aa000 2024-11-20T19:27:02,799 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/34519464bb4945e883d212eceb2aa000, entries=150, sequenceid=12, filesize=11.7 K 2024-11-20T19:27:02,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/1fa19907bd1f41f98e88f292a6b907b8 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/1fa19907bd1f41f98e88f292a6b907b8 2024-11-20T19:27:02,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/1fa19907bd1f41f98e88f292a6b907b8, entries=150, sequenceid=12, filesize=11.7 K 2024-11-20T19:27:02,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/597c4274dfc1447ebc5bb92c20167757 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/597c4274dfc1447ebc5bb92c20167757 2024-11-20T19:27:02,817 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/597c4274dfc1447ebc5bb92c20167757, entries=150, sequenceid=12, filesize=11.7 K 2024-11-20T19:27:02,818 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 794552c8bce342231c204cc0e02fbebc in 257ms, sequenceid=12, compaction requested=false 2024-11-20T19:27:02,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:02,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T19:27:02,853 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,853 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-20T19:27:02,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:02,854 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:27:02,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:02,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:02,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:02,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:02,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:02,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:02,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/1e8013a6b782461a90073fd9c271f0f6 is 50, key is test_row_0/A:col10/1732130822577/Put/seqid=0 2024-11-20T19:27:02,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:02,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:02,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130882885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130882886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130882887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742339_1515 (size=12001) 2024-11-20T19:27:02,899 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/1e8013a6b782461a90073fd9c271f0f6 2024-11-20T19:27:02,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130882901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/cf2ac32a80b54b3f89a8650556d747e2 is 50, key is test_row_0/B:col10/1732130822577/Put/seqid=0 2024-11-20T19:27:02,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742340_1516 (size=12001) 2024-11-20T19:27:02,931 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/cf2ac32a80b54b3f89a8650556d747e2 2024-11-20T19:27:02,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/d21d9587ecb74c47b324c1c791820ad4 is 50, key is test_row_0/C:col10/1732130822577/Put/seqid=0 2024-11-20T19:27:02,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130882944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742341_1517 (size=12001) 2024-11-20T19:27:02,964 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/d21d9587ecb74c47b324c1c791820ad4 2024-11-20T19:27:02,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/1e8013a6b782461a90073fd9c271f0f6 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1e8013a6b782461a90073fd9c271f0f6 2024-11-20T19:27:02,979 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1e8013a6b782461a90073fd9c271f0f6, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T19:27:02,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/cf2ac32a80b54b3f89a8650556d747e2 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/cf2ac32a80b54b3f89a8650556d747e2 2024-11-20T19:27:02,985 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/cf2ac32a80b54b3f89a8650556d747e2, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T19:27:02,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/d21d9587ecb74c47b324c1c791820ad4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/d21d9587ecb74c47b324c1c791820ad4 2024-11-20T19:27:02,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130882988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:02,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130882989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130882988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:02,991 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/d21d9587ecb74c47b324c1c791820ad4, entries=150, sequenceid=38, filesize=11.7 K 2024-11-20T19:27:02,991 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 794552c8bce342231c204cc0e02fbebc in 137ms, sequenceid=38, compaction requested=false 2024-11-20T19:27:02,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:02,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:02,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-20T19:27:02,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-20T19:27:02,993 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-20T19:27:02,993 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 442 msec 2024-11-20T19:27:02,994 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 446 msec 2024-11-20T19:27:03,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-20T19:27:03,150 INFO [Thread-2263 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-20T19:27:03,152 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:03,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-20T19:27:03,153 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:03,154 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:03,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T19:27:03,154 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:03,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:03,196 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:27:03,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:03,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:03,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:03,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:03,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:03,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:03,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/105840bf7441421b99003b6049431fe5 is 50, key is test_row_0/A:col10/1732130823193/Put/seqid=0 2024-11-20T19:27:03,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742342_1518 (size=14341) 2024-11-20T19:27:03,235 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/105840bf7441421b99003b6049431fe5 2024-11-20T19:27:03,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/83f96f71ffcd491a8d64b22a4226ffa0 is 50, key is test_row_0/B:col10/1732130823193/Put/seqid=0 2024-11-20T19:27:03,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742343_1519 (size=12001) 2024-11-20T19:27:03,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T19:27:03,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130883288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130883295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,306 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T19:27:03,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:03,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:03,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:03,307 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:03,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:03,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130883297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130883297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130883297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:03,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130883398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130883407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130883409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130883412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130883412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T19:27:03,461 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T19:27:03,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:03,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:03,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:03,462 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:03,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:03,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:03,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130883607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130883617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130883617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,623 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,623 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T19:27:03,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:03,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:03,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:03,623 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:03,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:03,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:03,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130883619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130883620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,655 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/83f96f71ffcd491a8d64b22a4226ffa0 2024-11-20T19:27:03,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/8abd58138aca45dd9f86d841738cfc47 is 50, key is test_row_0/C:col10/1732130823193/Put/seqid=0 2024-11-20T19:27:03,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742344_1520 (size=12001) 2024-11-20T19:27:03,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T19:27:03,775 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T19:27:03,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:03,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:03,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:03,775 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:03,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:03,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:03,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130883914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130883920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130883921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,927 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T19:27:03,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:03,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:03,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:03,928 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:03,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:03,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:03,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130883927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:03,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:03,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130883928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:04,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/8abd58138aca45dd9f86d841738cfc47 2024-11-20T19:27:04,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/105840bf7441421b99003b6049431fe5 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/105840bf7441421b99003b6049431fe5 2024-11-20T19:27:04,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/105840bf7441421b99003b6049431fe5, entries=200, sequenceid=51, filesize=14.0 K 2024-11-20T19:27:04,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/83f96f71ffcd491a8d64b22a4226ffa0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/83f96f71ffcd491a8d64b22a4226ffa0 2024-11-20T19:27:04,073 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/83f96f71ffcd491a8d64b22a4226ffa0, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T19:27:04,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/8abd58138aca45dd9f86d841738cfc47 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/8abd58138aca45dd9f86d841738cfc47 2024-11-20T19:27:04,076 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/8abd58138aca45dd9f86d841738cfc47, entries=150, sequenceid=51, filesize=11.7 K 2024-11-20T19:27:04,077 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 794552c8bce342231c204cc0e02fbebc in 881ms, sequenceid=51, compaction requested=true 2024-11-20T19:27:04,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:04,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:04,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:04,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:04,077 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:04,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:04,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:04,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:04,077 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:04,077 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:04,077 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:04,078 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/B is initiating minor compaction (all files) 2024-11-20T19:27:04,078 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/A is initiating minor compaction (all files) 2024-11-20T19:27:04,078 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/B in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:04,078 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/A in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:04,078 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/34519464bb4945e883d212eceb2aa000, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1e8013a6b782461a90073fd9c271f0f6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/105840bf7441421b99003b6049431fe5] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=37.4 K 2024-11-20T19:27:04,078 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/1fa19907bd1f41f98e88f292a6b907b8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/cf2ac32a80b54b3f89a8650556d747e2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/83f96f71ffcd491a8d64b22a4226ffa0] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=35.2 K 2024-11-20T19:27:04,078 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 1fa19907bd1f41f98e88f292a6b907b8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732130822555 2024-11-20T19:27:04,078 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34519464bb4945e883d212eceb2aa000, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732130822555 2024-11-20T19:27:04,078 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e8013a6b782461a90073fd9c271f0f6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732130822574 2024-11-20T19:27:04,078 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting cf2ac32a80b54b3f89a8650556d747e2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732130822574 2024-11-20T19:27:04,079 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 105840bf7441421b99003b6049431fe5, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732130822886 2024-11-20T19:27:04,079 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 83f96f71ffcd491a8d64b22a4226ffa0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732130822886 2024-11-20T19:27:04,079 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:04,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-20T19:27:04,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:04,080 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:27:04,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:04,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:04,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:04,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:04,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:04,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:04,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/df18074a3f9e4f85bf5d084caac04a7c is 50, key is test_row_0/A:col10/1732130823295/Put/seqid=0 2024-11-20T19:27:04,095 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#B#compaction#433 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:04,095 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#A#compaction#434 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:04,101 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/df366801d01f44acae6ad5b0c945e106 is 50, key is test_row_0/A:col10/1732130823193/Put/seqid=0 2024-11-20T19:27:04,101 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/81e8c419af3945f2b59e631157a79cb8 is 50, key is test_row_0/B:col10/1732130823193/Put/seqid=0 2024-11-20T19:27:04,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742345_1521 (size=12001) 2024-11-20T19:27:04,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742346_1522 (size=12104) 2024-11-20T19:27:04,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742347_1523 (size=12104) 2024-11-20T19:27:04,128 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/df366801d01f44acae6ad5b0c945e106 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/df366801d01f44acae6ad5b0c945e106 2024-11-20T19:27:04,132 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/A of 794552c8bce342231c204cc0e02fbebc into df366801d01f44acae6ad5b0c945e106(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:04,132 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:04,132 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/A, priority=13, startTime=1732130824077; duration=0sec 2024-11-20T19:27:04,133 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:04,133 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:A 2024-11-20T19:27:04,133 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:04,133 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:04,133 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/C is initiating minor compaction (all files) 2024-11-20T19:27:04,134 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/C in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:04,134 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/597c4274dfc1447ebc5bb92c20167757, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/d21d9587ecb74c47b324c1c791820ad4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/8abd58138aca45dd9f86d841738cfc47] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=35.2 K 2024-11-20T19:27:04,134 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 597c4274dfc1447ebc5bb92c20167757, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732130822555 2024-11-20T19:27:04,134 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d21d9587ecb74c47b324c1c791820ad4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732130822574 2024-11-20T19:27:04,135 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8abd58138aca45dd9f86d841738cfc47, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732130822886 2024-11-20T19:27:04,141 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#C#compaction#435 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:04,142 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/c1fc990b346245c191e052e0faa7def5 is 50, key is test_row_0/C:col10/1732130823193/Put/seqid=0 2024-11-20T19:27:04,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742348_1524 (size=12104) 2024-11-20T19:27:04,152 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/c1fc990b346245c191e052e0faa7def5 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/c1fc990b346245c191e052e0faa7def5 2024-11-20T19:27:04,157 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/C of 794552c8bce342231c204cc0e02fbebc into c1fc990b346245c191e052e0faa7def5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:04,157 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:04,157 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/C, priority=13, startTime=1732130824077; duration=0sec 2024-11-20T19:27:04,157 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:04,157 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:C 2024-11-20T19:27:04,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T19:27:04,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:04,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:04,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:04,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130884429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:04,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:04,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130884432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:04,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:04,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130884432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:04,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:04,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130884432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:04,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:04,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130884434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:04,512 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/df18074a3f9e4f85bf5d084caac04a7c 2024-11-20T19:27:04,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/2a16f158a4c343618c5c6244b173e9ac is 50, key is test_row_0/B:col10/1732130823295/Put/seqid=0 2024-11-20T19:27:04,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742349_1525 (size=12001) 2024-11-20T19:27:04,525 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/81e8c419af3945f2b59e631157a79cb8 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/81e8c419af3945f2b59e631157a79cb8 2024-11-20T19:27:04,529 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/B of 794552c8bce342231c204cc0e02fbebc into 81e8c419af3945f2b59e631157a79cb8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:04,529 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:04,529 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/B, priority=13, startTime=1732130824077; duration=0sec 2024-11-20T19:27:04,529 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:04,529 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:B 2024-11-20T19:27:04,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:04,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130884535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:04,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:04,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130884535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:04,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:04,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130884537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:04,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:04,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130884739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:04,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:04,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130884739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:04,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:04,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130884740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:04,926 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/2a16f158a4c343618c5c6244b173e9ac 2024-11-20T19:27:04,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/2025cb93acf84daa93ab30dd7acfff30 is 50, key is test_row_0/C:col10/1732130823295/Put/seqid=0 2024-11-20T19:27:04,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742350_1526 (size=12001) 2024-11-20T19:27:05,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:05,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130885044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:05,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:05,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130885045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:05,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:05,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130885045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:05,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T19:27:05,335 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/2025cb93acf84daa93ab30dd7acfff30 2024-11-20T19:27:05,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/df18074a3f9e4f85bf5d084caac04a7c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/df18074a3f9e4f85bf5d084caac04a7c 2024-11-20T19:27:05,341 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/df18074a3f9e4f85bf5d084caac04a7c, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T19:27:05,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/2a16f158a4c343618c5c6244b173e9ac as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/2a16f158a4c343618c5c6244b173e9ac 2024-11-20T19:27:05,344 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/2a16f158a4c343618c5c6244b173e9ac, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T19:27:05,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/2025cb93acf84daa93ab30dd7acfff30 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/2025cb93acf84daa93ab30dd7acfff30 2024-11-20T19:27:05,347 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/2025cb93acf84daa93ab30dd7acfff30, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T19:27:05,348 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 794552c8bce342231c204cc0e02fbebc in 1268ms, sequenceid=76, compaction requested=false 2024-11-20T19:27:05,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:05,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:05,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-20T19:27:05,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-20T19:27:05,350 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-20T19:27:05,350 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1950 sec 2024-11-20T19:27:05,351 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 2.1980 sec 2024-11-20T19:27:05,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:05,448 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:27:05,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:05,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:05,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:05,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:05,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:05,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:05,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/a8c3fa3504904917bbf8891729fc45d9 is 50, key is test_row_0/A:col10/1732130824431/Put/seqid=0 2024-11-20T19:27:05,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742351_1527 (size=14341) 2024-11-20T19:27:05,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:05,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130885499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:05,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:05,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130885503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:05,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:05,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130885549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:05,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:05,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130885552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:05,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:05,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130885553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:05,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:05,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130885607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:05,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:05,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130885612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:05,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:05,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130885813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:05,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:05,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130885817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:05,855 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/a8c3fa3504904917bbf8891729fc45d9 2024-11-20T19:27:05,861 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/4601f80fc5fd4c128b6db6f2e0d0ac5e is 50, key is test_row_0/B:col10/1732130824431/Put/seqid=0 2024-11-20T19:27:05,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742352_1528 (size=12001) 2024-11-20T19:27:06,108 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/34519464bb4945e883d212eceb2aa000, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1e8013a6b782461a90073fd9c271f0f6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/105840bf7441421b99003b6049431fe5] to archive 2024-11-20T19:27:06,109 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:06,110 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/34519464bb4945e883d212eceb2aa000 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/34519464bb4945e883d212eceb2aa000 2024-11-20T19:27:06,111 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1e8013a6b782461a90073fd9c271f0f6 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1e8013a6b782461a90073fd9c271f0f6 2024-11-20T19:27:06,112 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/105840bf7441421b99003b6049431fe5 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/105840bf7441421b99003b6049431fe5 2024-11-20T19:27:06,112 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/1fa19907bd1f41f98e88f292a6b907b8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/cf2ac32a80b54b3f89a8650556d747e2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/83f96f71ffcd491a8d64b22a4226ffa0] to archive 2024-11-20T19:27:06,113 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:06,114 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/1fa19907bd1f41f98e88f292a6b907b8 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/1fa19907bd1f41f98e88f292a6b907b8 2024-11-20T19:27:06,116 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/cf2ac32a80b54b3f89a8650556d747e2 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/cf2ac32a80b54b3f89a8650556d747e2 2024-11-20T19:27:06,117 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/83f96f71ffcd491a8d64b22a4226ffa0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/83f96f71ffcd491a8d64b22a4226ffa0 2024-11-20T19:27:06,118 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/597c4274dfc1447ebc5bb92c20167757, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/d21d9587ecb74c47b324c1c791820ad4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/8abd58138aca45dd9f86d841738cfc47] to archive 2024-11-20T19:27:06,118 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:06,120 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/597c4274dfc1447ebc5bb92c20167757 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/597c4274dfc1447ebc5bb92c20167757 2024-11-20T19:27:06,120 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/d21d9587ecb74c47b324c1c791820ad4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/d21d9587ecb74c47b324c1c791820ad4 2024-11-20T19:27:06,121 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/db9c3a6c6492:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/8abd58138aca45dd9f86d841738cfc47 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/8abd58138aca45dd9f86d841738cfc47 2024-11-20T19:27:06,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:06,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130886118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:06,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130886123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:06,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/4601f80fc5fd4c128b6db6f2e0d0ac5e 2024-11-20T19:27:06,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/5e7ea7b3a76e4f079187cdd12bcd07ec is 50, key is test_row_0/C:col10/1732130824431/Put/seqid=0 2024-11-20T19:27:06,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742353_1529 (size=12001) 2024-11-20T19:27:06,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:06,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130886554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:06,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:06,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130886559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:06,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:06,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130886563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:06,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:06,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130886626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:06,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:06,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130886630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:06,689 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/5e7ea7b3a76e4f079187cdd12bcd07ec 2024-11-20T19:27:06,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/a8c3fa3504904917bbf8891729fc45d9 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a8c3fa3504904917bbf8891729fc45d9 2024-11-20T19:27:06,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a8c3fa3504904917bbf8891729fc45d9, entries=200, sequenceid=91, filesize=14.0 K 2024-11-20T19:27:06,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/4601f80fc5fd4c128b6db6f2e0d0ac5e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/4601f80fc5fd4c128b6db6f2e0d0ac5e 2024-11-20T19:27:06,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/4601f80fc5fd4c128b6db6f2e0d0ac5e, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T19:27:06,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/5e7ea7b3a76e4f079187cdd12bcd07ec as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/5e7ea7b3a76e4f079187cdd12bcd07ec 2024-11-20T19:27:06,701 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/5e7ea7b3a76e4f079187cdd12bcd07ec, entries=150, sequenceid=91, filesize=11.7 K 2024-11-20T19:27:06,701 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 794552c8bce342231c204cc0e02fbebc in 1253ms, sequenceid=91, compaction requested=true 2024-11-20T19:27:06,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:06,702 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:06,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:06,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:06,702 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:06,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:06,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:06,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:06,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:06,702 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T19:27:06,702 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:06,702 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:06,703 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/A is initiating minor compaction (all files) 2024-11-20T19:27:06,703 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/B is initiating minor compaction (all files) 2024-11-20T19:27:06,703 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/A in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:06,703 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/B in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:06,703 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/df366801d01f44acae6ad5b0c945e106, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/df18074a3f9e4f85bf5d084caac04a7c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a8c3fa3504904917bbf8891729fc45d9] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=37.5 K 2024-11-20T19:27:06,703 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/81e8c419af3945f2b59e631157a79cb8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/2a16f158a4c343618c5c6244b173e9ac, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/4601f80fc5fd4c128b6db6f2e0d0ac5e] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=35.3 K 2024-11-20T19:27:06,703 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 81e8c419af3945f2b59e631157a79cb8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732130822886 2024-11-20T19:27:06,703 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting df366801d01f44acae6ad5b0c945e106, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732130822886 2024-11-20T19:27:06,703 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting df18074a3f9e4f85bf5d084caac04a7c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732130823207 2024-11-20T19:27:06,703 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a16f158a4c343618c5c6244b173e9ac, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732130823207 2024-11-20T19:27:06,704 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8c3fa3504904917bbf8891729fc45d9, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130824423 2024-11-20T19:27:06,704 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 4601f80fc5fd4c128b6db6f2e0d0ac5e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130824431 2024-11-20T19:27:06,713 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#B#compaction#441 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:06,713 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/239d9b9ea1d840a1b8e448f909da68b4 is 50, key is test_row_0/B:col10/1732130824431/Put/seqid=0 2024-11-20T19:27:06,725 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#A#compaction#442 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:06,725 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/ee9e762c86964714ba5c476e0a91454d is 50, key is test_row_0/A:col10/1732130824431/Put/seqid=0 2024-11-20T19:27:06,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742354_1530 (size=12104) 2024-11-20T19:27:06,735 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/239d9b9ea1d840a1b8e448f909da68b4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/239d9b9ea1d840a1b8e448f909da68b4 2024-11-20T19:27:06,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742355_1531 (size=12104) 2024-11-20T19:27:06,740 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/ee9e762c86964714ba5c476e0a91454d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/ee9e762c86964714ba5c476e0a91454d 2024-11-20T19:27:06,740 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/B of 794552c8bce342231c204cc0e02fbebc into 239d9b9ea1d840a1b8e448f909da68b4(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:06,740 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:06,740 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/B, priority=13, startTime=1732130826702; duration=0sec 2024-11-20T19:27:06,740 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:06,740 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:B 2024-11-20T19:27:06,740 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:06,741 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:06,741 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/C is initiating minor compaction (all files) 2024-11-20T19:27:06,741 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/C in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:06,741 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/c1fc990b346245c191e052e0faa7def5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/2025cb93acf84daa93ab30dd7acfff30, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/5e7ea7b3a76e4f079187cdd12bcd07ec] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=35.3 K 2024-11-20T19:27:06,743 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting c1fc990b346245c191e052e0faa7def5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732130822886 2024-11-20T19:27:06,743 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 2025cb93acf84daa93ab30dd7acfff30, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732130823207 2024-11-20T19:27:06,744 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e7ea7b3a76e4f079187cdd12bcd07ec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130824431 2024-11-20T19:27:06,744 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/A of 794552c8bce342231c204cc0e02fbebc into ee9e762c86964714ba5c476e0a91454d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:06,744 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:06,744 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/A, priority=13, startTime=1732130826702; duration=0sec 2024-11-20T19:27:06,744 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:06,744 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:A 2024-11-20T19:27:06,749 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#C#compaction#443 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:06,750 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/e8c8ee28ba48450a8c91ce3e2015bda0 is 50, key is test_row_0/C:col10/1732130824431/Put/seqid=0 2024-11-20T19:27:06,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742356_1532 (size=12104) 2024-11-20T19:27:07,157 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/e8c8ee28ba48450a8c91ce3e2015bda0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e8c8ee28ba48450a8c91ce3e2015bda0 2024-11-20T19:27:07,161 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/C of 794552c8bce342231c204cc0e02fbebc into e8c8ee28ba48450a8c91ce3e2015bda0(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:07,161 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:07,161 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/C, priority=13, startTime=1732130826702; duration=0sec 2024-11-20T19:27:07,161 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:07,161 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:C 2024-11-20T19:27:07,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-20T19:27:07,259 INFO [Thread-2263 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-20T19:27:07,259 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:07,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-20T19:27:07,261 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:07,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T19:27:07,261 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:07,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:07,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T19:27:07,413 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:07,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-20T19:27:07,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:07,413 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T19:27:07,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:07,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:07,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:07,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:07,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:07,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:07,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/01a257cfaae74dbabac90bdfbe4d3c43 is 50, key is test_row_0/A:col10/1732130825494/Put/seqid=0 2024-11-20T19:27:07,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742357_1533 (size=12001) 2024-11-20T19:27:07,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T19:27:07,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:07,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:07,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:07,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130887652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:07,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:07,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130887654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:07,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:07,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130887757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:07,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:07,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130887759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:07,821 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/01a257cfaae74dbabac90bdfbe4d3c43 2024-11-20T19:27:07,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/cfe9ffbb9e6f4fb3abe0afbbf1b47faa is 50, key is test_row_0/B:col10/1732130825494/Put/seqid=0 2024-11-20T19:27:07,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742358_1534 (size=12001) 2024-11-20T19:27:07,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T19:27:07,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:07,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130887962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:07,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:07,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130887964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:08,230 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/cfe9ffbb9e6f4fb3abe0afbbf1b47faa 2024-11-20T19:27:08,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/6353b1a270324fa0bdd7a5e63b949b49 is 50, key is test_row_0/C:col10/1732130825494/Put/seqid=0 2024-11-20T19:27:08,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742359_1535 (size=12001) 2024-11-20T19:27:08,238 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/6353b1a270324fa0bdd7a5e63b949b49 2024-11-20T19:27:08,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/01a257cfaae74dbabac90bdfbe4d3c43 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/01a257cfaae74dbabac90bdfbe4d3c43 2024-11-20T19:27:08,243 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/01a257cfaae74dbabac90bdfbe4d3c43, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T19:27:08,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/cfe9ffbb9e6f4fb3abe0afbbf1b47faa as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/cfe9ffbb9e6f4fb3abe0afbbf1b47faa 2024-11-20T19:27:08,245 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/cfe9ffbb9e6f4fb3abe0afbbf1b47faa, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T19:27:08,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/6353b1a270324fa0bdd7a5e63b949b49 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6353b1a270324fa0bdd7a5e63b949b49 2024-11-20T19:27:08,248 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6353b1a270324fa0bdd7a5e63b949b49, entries=150, sequenceid=118, filesize=11.7 K 2024-11-20T19:27:08,249 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 794552c8bce342231c204cc0e02fbebc in 836ms, sequenceid=118, compaction requested=false 2024-11-20T19:27:08,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:08,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:08,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-20T19:27:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-20T19:27:08,251 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-20T19:27:08,251 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 989 msec 2024-11-20T19:27:08,252 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 992 msec 2024-11-20T19:27:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:08,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:27:08,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:08,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:08,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:08,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:08,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:08,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:08,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/5357bf0f69ce4bb087aaf1c26d8880a5 is 50, key is test_row_0/A:col10/1732130827646/Put/seqid=0 2024-11-20T19:27:08,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742360_1536 (size=16831) 2024-11-20T19:27:08,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130888325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:08,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130888328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-20T19:27:08,363 INFO [Thread-2263 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-20T19:27:08,364 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-20T19:27:08,365 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T19:27:08,366 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:08,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:08,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:08,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130888429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:08,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:08,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130888434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:08,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T19:27:08,517 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:08,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T19:27:08,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:08,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:08,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:08,517 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:08,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:08,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:08,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:08,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130888577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:08,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:08,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130888578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:08,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:08,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130888578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:08,583 DEBUG [Thread-2259 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4151 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., hostname=db9c3a6c6492,35979,1732130703276, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:08,583 DEBUG [Thread-2253 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4152 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., hostname=db9c3a6c6492,35979,1732130703276, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:08,584 DEBUG [Thread-2255 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4154 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., hostname=db9c3a6c6492,35979,1732130703276, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-20T19:27:08,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:08,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130888634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:08,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:08,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130888639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:08,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T19:27:08,669 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:08,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T19:27:08,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:08,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:08,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:08,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:08,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:08,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:08,685 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/5357bf0f69ce4bb087aaf1c26d8880a5 2024-11-20T19:27:08,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/267eee7b1e2b4153a37d9a23227be2e0 is 50, key is test_row_0/B:col10/1732130827646/Put/seqid=0 2024-11-20T19:27:08,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742361_1537 (size=12051) 2024-11-20T19:27:08,821 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:08,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T19:27:08,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:08,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:08,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:08,822 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:08,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:08,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:08,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:08,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130888936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:08,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:08,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130888940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:08,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T19:27:08,973 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:08,974 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T19:27:08,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:08,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:08,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:08,974 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:08,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:08,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:09,098 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/267eee7b1e2b4153a37d9a23227be2e0 2024-11-20T19:27:09,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/6638b4ffc7bd470f81f4793edb989972 is 50, key is test_row_0/C:col10/1732130827646/Put/seqid=0 2024-11-20T19:27:09,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742362_1538 (size=12051) 2024-11-20T19:27:09,125 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:09,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T19:27:09,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:09,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:09,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:09,126 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:09,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:09,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:09,278 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:09,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T19:27:09,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:09,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:09,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:09,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:09,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:09,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:09,430 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:09,430 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T19:27:09,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:09,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:09,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:09,431 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:09,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:09,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:09,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:09,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130889441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:09,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:09,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130889446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:09,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T19:27:09,507 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/6638b4ffc7bd470f81f4793edb989972 2024-11-20T19:27:09,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/5357bf0f69ce4bb087aaf1c26d8880a5 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/5357bf0f69ce4bb087aaf1c26d8880a5 2024-11-20T19:27:09,513 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/5357bf0f69ce4bb087aaf1c26d8880a5, entries=250, sequenceid=132, filesize=16.4 K 2024-11-20T19:27:09,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/267eee7b1e2b4153a37d9a23227be2e0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/267eee7b1e2b4153a37d9a23227be2e0 2024-11-20T19:27:09,516 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/267eee7b1e2b4153a37d9a23227be2e0, entries=150, sequenceid=132, filesize=11.8 K 2024-11-20T19:27:09,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/6638b4ffc7bd470f81f4793edb989972 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6638b4ffc7bd470f81f4793edb989972 2024-11-20T19:27:09,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6638b4ffc7bd470f81f4793edb989972, entries=150, sequenceid=132, filesize=11.8 K 2024-11-20T19:27:09,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 794552c8bce342231c204cc0e02fbebc in 1250ms, sequenceid=132, compaction requested=true 2024-11-20T19:27:09,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:09,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:09,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:09,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:09,520 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:09,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:09,520 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:09,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:09,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:09,520 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40936 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:09,520 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36156 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:09,520 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/B is initiating minor compaction (all files) 2024-11-20T19:27:09,520 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/A is initiating minor compaction (all files) 2024-11-20T19:27:09,520 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/A in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:09,520 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/B in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:09,520 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/239d9b9ea1d840a1b8e448f909da68b4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/cfe9ffbb9e6f4fb3abe0afbbf1b47faa, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/267eee7b1e2b4153a37d9a23227be2e0] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=35.3 K 2024-11-20T19:27:09,520 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/ee9e762c86964714ba5c476e0a91454d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/01a257cfaae74dbabac90bdfbe4d3c43, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/5357bf0f69ce4bb087aaf1c26d8880a5] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=40.0 K 2024-11-20T19:27:09,521 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee9e762c86964714ba5c476e0a91454d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130824431 2024-11-20T19:27:09,521 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 239d9b9ea1d840a1b8e448f909da68b4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130824431 2024-11-20T19:27:09,521 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01a257cfaae74dbabac90bdfbe4d3c43, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732130825494 2024-11-20T19:27:09,521 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting cfe9ffbb9e6f4fb3abe0afbbf1b47faa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732130825494 2024-11-20T19:27:09,521 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5357bf0f69ce4bb087aaf1c26d8880a5, keycount=250, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732130827646 2024-11-20T19:27:09,521 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 267eee7b1e2b4153a37d9a23227be2e0, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732130827646 2024-11-20T19:27:09,526 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#A#compaction#450 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:09,526 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#B#compaction#451 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:09,526 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/580504fe54ba43a7af3e0681f7989e46 is 50, key is test_row_0/A:col10/1732130827646/Put/seqid=0 2024-11-20T19:27:09,527 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/3d0e45935f1b40839f4bd95ccb2c5334 is 50, key is test_row_0/B:col10/1732130827646/Put/seqid=0 2024-11-20T19:27:09,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742363_1539 (size=12257) 2024-11-20T19:27:09,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742364_1540 (size=12257) 2024-11-20T19:27:09,533 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/3d0e45935f1b40839f4bd95ccb2c5334 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/3d0e45935f1b40839f4bd95ccb2c5334 2024-11-20T19:27:09,536 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/B of 794552c8bce342231c204cc0e02fbebc into 3d0e45935f1b40839f4bd95ccb2c5334(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:09,536 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:09,536 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/B, priority=13, startTime=1732130829520; duration=0sec 2024-11-20T19:27:09,536 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:09,536 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:B 2024-11-20T19:27:09,536 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:09,536 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36156 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:09,536 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/C is initiating minor compaction (all files) 2024-11-20T19:27:09,537 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/C in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:09,537 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e8c8ee28ba48450a8c91ce3e2015bda0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6353b1a270324fa0bdd7a5e63b949b49, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6638b4ffc7bd470f81f4793edb989972] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=35.3 K 2024-11-20T19:27:09,537 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting e8c8ee28ba48450a8c91ce3e2015bda0, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732130824431 2024-11-20T19:27:09,537 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 6353b1a270324fa0bdd7a5e63b949b49, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732130825494 2024-11-20T19:27:09,537 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 6638b4ffc7bd470f81f4793edb989972, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732130827646 2024-11-20T19:27:09,542 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#C#compaction#452 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:09,543 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/64b7cec41848416e998f9be5e5c9c875 is 50, key is test_row_0/C:col10/1732130827646/Put/seqid=0 2024-11-20T19:27:09,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742365_1541 (size=12257) 2024-11-20T19:27:09,582 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:09,583 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-20T19:27:09,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:09,583 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T19:27:09,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:09,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:09,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:09,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:09,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:09,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:09,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/5fe710095d3e43658659f8f1fd3fd043 is 50, key is test_row_0/A:col10/1732130828314/Put/seqid=0 2024-11-20T19:27:09,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742366_1542 (size=12151) 2024-11-20T19:27:09,933 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/580504fe54ba43a7af3e0681f7989e46 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/580504fe54ba43a7af3e0681f7989e46 2024-11-20T19:27:09,937 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/A of 794552c8bce342231c204cc0e02fbebc into 580504fe54ba43a7af3e0681f7989e46(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:09,937 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:09,937 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/A, priority=13, startTime=1732130829519; duration=0sec 2024-11-20T19:27:09,937 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:09,937 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:A 2024-11-20T19:27:09,951 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/64b7cec41848416e998f9be5e5c9c875 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/64b7cec41848416e998f9be5e5c9c875 2024-11-20T19:27:09,954 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/C of 794552c8bce342231c204cc0e02fbebc into 64b7cec41848416e998f9be5e5c9c875(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:09,954 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:09,954 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/C, priority=13, startTime=1732130829520; duration=0sec 2024-11-20T19:27:09,955 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:09,955 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:C 2024-11-20T19:27:09,992 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/5fe710095d3e43658659f8f1fd3fd043 2024-11-20T19:27:09,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/aa373d64900f4ed781348f060ec0848c is 50, key is test_row_0/B:col10/1732130828314/Put/seqid=0 2024-11-20T19:27:10,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742367_1543 (size=12151) 2024-11-20T19:27:10,400 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/aa373d64900f4ed781348f060ec0848c 2024-11-20T19:27:10,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/438a5135f04e43818b405c51ceec835a is 50, key is test_row_0/C:col10/1732130828314/Put/seqid=0 2024-11-20T19:27:10,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742368_1544 (size=12151) 2024-11-20T19:27:10,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:10,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:10,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T19:27:10,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:10,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130890478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:10,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:10,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130890479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:10,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:10,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130890583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:10,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:10,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130890587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:10,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:10,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130890786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:10,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:10,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130890791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:10,829 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/438a5135f04e43818b405c51ceec835a 2024-11-20T19:27:10,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/5fe710095d3e43658659f8f1fd3fd043 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/5fe710095d3e43658659f8f1fd3fd043 2024-11-20T19:27:10,834 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/5fe710095d3e43658659f8f1fd3fd043, entries=150, sequenceid=155, filesize=11.9 K 2024-11-20T19:27:10,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/aa373d64900f4ed781348f060ec0848c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/aa373d64900f4ed781348f060ec0848c 2024-11-20T19:27:10,837 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/aa373d64900f4ed781348f060ec0848c, entries=150, sequenceid=155, filesize=11.9 K 2024-11-20T19:27:10,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/438a5135f04e43818b405c51ceec835a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/438a5135f04e43818b405c51ceec835a 2024-11-20T19:27:10,840 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/438a5135f04e43818b405c51ceec835a, entries=150, sequenceid=155, filesize=11.9 K 2024-11-20T19:27:10,841 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 794552c8bce342231c204cc0e02fbebc in 1258ms, sequenceid=155, compaction requested=false 2024-11-20T19:27:10,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:10,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:10,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-20T19:27:10,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-20T19:27:10,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-20T19:27:10,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4760 sec 2024-11-20T19:27:10,844 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 2.4790 sec 2024-11-20T19:27:11,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:11,093 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T19:27:11,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:11,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:11,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:11,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:11,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:11,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:11,096 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/55d2dc7e77c74831b47790132f5b185e is 50, key is test_row_0/A:col10/1732130831092/Put/seqid=0 2024-11-20T19:27:11,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742369_1545 (size=14541) 2024-11-20T19:27:11,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130891136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:11,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130891139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:11,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130891242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:11,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130891242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:11,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130891447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:11,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130891448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:11,500 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/55d2dc7e77c74831b47790132f5b185e 2024-11-20T19:27:11,507 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/57adee12f19e472f8d33f3da4694654f is 50, key is test_row_0/B:col10/1732130831092/Put/seqid=0 2024-11-20T19:27:11,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742370_1546 (size=12151) 2024-11-20T19:27:11,518 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/57adee12f19e472f8d33f3da4694654f 2024-11-20T19:27:11,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/db5be57a0b6047109bb423bc8e01ff89 is 50, key is test_row_0/C:col10/1732130831092/Put/seqid=0 2024-11-20T19:27:11,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742371_1547 (size=12151) 2024-11-20T19:27:11,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/db5be57a0b6047109bb423bc8e01ff89 2024-11-20T19:27:11,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/55d2dc7e77c74831b47790132f5b185e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/55d2dc7e77c74831b47790132f5b185e 2024-11-20T19:27:11,547 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/55d2dc7e77c74831b47790132f5b185e, entries=200, sequenceid=172, filesize=14.2 K 2024-11-20T19:27:11,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/57adee12f19e472f8d33f3da4694654f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/57adee12f19e472f8d33f3da4694654f 2024-11-20T19:27:11,554 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/57adee12f19e472f8d33f3da4694654f, entries=150, sequenceid=172, filesize=11.9 K 2024-11-20T19:27:11,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/db5be57a0b6047109bb423bc8e01ff89 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/db5be57a0b6047109bb423bc8e01ff89 2024-11-20T19:27:11,559 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/db5be57a0b6047109bb423bc8e01ff89, entries=150, sequenceid=172, filesize=11.9 K 2024-11-20T19:27:11,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 794552c8bce342231c204cc0e02fbebc in 468ms, sequenceid=172, compaction requested=true 2024-11-20T19:27:11,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:11,560 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:11,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:11,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:11,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:11,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:11,560 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:11,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:11,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:11,561 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38949 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:11,561 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/A is initiating minor compaction (all files) 2024-11-20T19:27:11,561 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/A in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:11,561 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/580504fe54ba43a7af3e0681f7989e46, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/5fe710095d3e43658659f8f1fd3fd043, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/55d2dc7e77c74831b47790132f5b185e] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=38.0 K 2024-11-20T19:27:11,562 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36559 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:11,562 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/B is initiating minor compaction (all files) 2024-11-20T19:27:11,562 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/B in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:11,563 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/3d0e45935f1b40839f4bd95ccb2c5334, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/aa373d64900f4ed781348f060ec0848c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/57adee12f19e472f8d33f3da4694654f] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=35.7 K 2024-11-20T19:27:11,563 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 580504fe54ba43a7af3e0681f7989e46, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732130827646 2024-11-20T19:27:11,563 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d0e45935f1b40839f4bd95ccb2c5334, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732130827646 2024-11-20T19:27:11,563 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting aa373d64900f4ed781348f060ec0848c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732130828314 2024-11-20T19:27:11,564 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5fe710095d3e43658659f8f1fd3fd043, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732130828314 2024-11-20T19:27:11,564 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 57adee12f19e472f8d33f3da4694654f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130830460 2024-11-20T19:27:11,564 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55d2dc7e77c74831b47790132f5b185e, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130830460 2024-11-20T19:27:11,572 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#B#compaction#459 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:11,573 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/76c1acbf43b745309c33d1967ac3a97a is 50, key is test_row_0/B:col10/1732130831092/Put/seqid=0 2024-11-20T19:27:11,582 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#A#compaction#460 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:11,583 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/1f37c8ae522445df98c7fa037ea7d169 is 50, key is test_row_0/A:col10/1732130831092/Put/seqid=0 2024-11-20T19:27:11,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742372_1548 (size=12459) 2024-11-20T19:27:11,611 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/76c1acbf43b745309c33d1967ac3a97a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/76c1acbf43b745309c33d1967ac3a97a 2024-11-20T19:27:11,617 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/B of 794552c8bce342231c204cc0e02fbebc into 76c1acbf43b745309c33d1967ac3a97a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:11,617 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:11,617 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/B, priority=13, startTime=1732130831560; duration=0sec 2024-11-20T19:27:11,617 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:11,617 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:B 2024-11-20T19:27:11,617 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:11,618 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36559 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:11,618 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/C is initiating minor compaction (all files) 2024-11-20T19:27:11,618 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/C in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:11,618 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/64b7cec41848416e998f9be5e5c9c875, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/438a5135f04e43818b405c51ceec835a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/db5be57a0b6047109bb423bc8e01ff89] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=35.7 K 2024-11-20T19:27:11,619 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 64b7cec41848416e998f9be5e5c9c875, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732130827646 2024-11-20T19:27:11,619 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 438a5135f04e43818b405c51ceec835a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732130828314 2024-11-20T19:27:11,619 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting db5be57a0b6047109bb423bc8e01ff89, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130830460 2024-11-20T19:27:11,637 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#C#compaction#461 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:11,637 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/c09f9358568544dca65fe23a7fe0cc49 is 50, key is test_row_0/C:col10/1732130831092/Put/seqid=0 2024-11-20T19:27:11,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742373_1549 (size=12459) 2024-11-20T19:27:11,643 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/1f37c8ae522445df98c7fa037ea7d169 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1f37c8ae522445df98c7fa037ea7d169 2024-11-20T19:27:11,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742374_1550 (size=12459) 2024-11-20T19:27:11,647 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/c09f9358568544dca65fe23a7fe0cc49 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/c09f9358568544dca65fe23a7fe0cc49 2024-11-20T19:27:11,649 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/A of 794552c8bce342231c204cc0e02fbebc into 1f37c8ae522445df98c7fa037ea7d169(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:11,649 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:11,649 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/A, priority=13, startTime=1732130831560; duration=0sec 2024-11-20T19:27:11,650 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:11,650 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:A 2024-11-20T19:27:11,656 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/C of 794552c8bce342231c204cc0e02fbebc into c09f9358568544dca65fe23a7fe0cc49(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:11,656 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:11,656 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/C, priority=13, startTime=1732130831560; duration=0sec 2024-11-20T19:27:11,656 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:11,656 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:C 2024-11-20T19:27:11,755 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T19:27:11,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:11,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:11,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:11,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:11,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:11,755 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:11,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:11,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/7c76f98d2a1f4416b75a868a02886309 is 50, key is test_row_0/A:col10/1732130831753/Put/seqid=0 2024-11-20T19:27:11,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130891781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:11,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130891785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:11,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742375_1551 (size=14541) 2024-11-20T19:27:11,802 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/7c76f98d2a1f4416b75a868a02886309 2024-11-20T19:27:11,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/6f65b0435ec446c49a04b7483b548465 is 50, key is test_row_0/B:col10/1732130831753/Put/seqid=0 2024-11-20T19:27:11,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742376_1552 (size=12151) 2024-11-20T19:27:11,832 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/6f65b0435ec446c49a04b7483b548465 2024-11-20T19:27:11,840 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/e07990235b8a49db89d68dd81bfd5335 is 50, key is test_row_0/C:col10/1732130831753/Put/seqid=0 2024-11-20T19:27:11,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742377_1553 (size=12151) 2024-11-20T19:27:11,856 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/e07990235b8a49db89d68dd81bfd5335 2024-11-20T19:27:11,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/7c76f98d2a1f4416b75a868a02886309 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/7c76f98d2a1f4416b75a868a02886309 2024-11-20T19:27:11,875 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/7c76f98d2a1f4416b75a868a02886309, entries=200, sequenceid=197, filesize=14.2 K 2024-11-20T19:27:11,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/6f65b0435ec446c49a04b7483b548465 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6f65b0435ec446c49a04b7483b548465 2024-11-20T19:27:11,878 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6f65b0435ec446c49a04b7483b548465, entries=150, sequenceid=197, filesize=11.9 K 2024-11-20T19:27:11,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/e07990235b8a49db89d68dd81bfd5335 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e07990235b8a49db89d68dd81bfd5335 2024-11-20T19:27:11,882 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e07990235b8a49db89d68dd81bfd5335, entries=150, sequenceid=197, filesize=11.9 K 2024-11-20T19:27:11,883 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 794552c8bce342231c204cc0e02fbebc in 128ms, sequenceid=197, compaction requested=false 2024-11-20T19:27:11,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:11,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:11,896 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T19:27:11,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:11,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:11,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:11,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:11,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:11,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:11,901 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/4ebf316359dd4197b1893e1fd74c431f is 50, key is test_row_0/A:col10/1732130831896/Put/seqid=0 2024-11-20T19:27:11,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742378_1554 (size=16931) 2024-11-20T19:27:11,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130891950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:11,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:11,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130891951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130892059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130892059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130892265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,272 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130892266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,327 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/4ebf316359dd4197b1893e1fd74c431f 2024-11-20T19:27:12,348 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/f9a9a92954214558b73eba8bd271be1b is 50, key is test_row_0/B:col10/1732130831896/Put/seqid=0 2024-11-20T19:27:12,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742379_1555 (size=12151) 2024-11-20T19:27:12,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/f9a9a92954214558b73eba8bd271be1b 2024-11-20T19:27:12,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/e5bccfd9b1be4cfc803ab8acda2caa5b is 50, key is test_row_0/C:col10/1732130831896/Put/seqid=0 2024-11-20T19:27:12,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742380_1556 (size=12151) 2024-11-20T19:27:12,445 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/e5bccfd9b1be4cfc803ab8acda2caa5b 2024-11-20T19:27:12,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/4ebf316359dd4197b1893e1fd74c431f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4ebf316359dd4197b1893e1fd74c431f 2024-11-20T19:27:12,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4ebf316359dd4197b1893e1fd74c431f, entries=250, sequenceid=213, filesize=16.5 K 2024-11-20T19:27:12,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/f9a9a92954214558b73eba8bd271be1b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/f9a9a92954214558b73eba8bd271be1b 2024-11-20T19:27:12,465 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/f9a9a92954214558b73eba8bd271be1b, entries=150, sequenceid=213, filesize=11.9 K 2024-11-20T19:27:12,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/e5bccfd9b1be4cfc803ab8acda2caa5b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e5bccfd9b1be4cfc803ab8acda2caa5b 2024-11-20T19:27:12,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-20T19:27:12,470 INFO [Thread-2263 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-20T19:27:12,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e5bccfd9b1be4cfc803ab8acda2caa5b, entries=150, sequenceid=213, filesize=11.9 K 2024-11-20T19:27:12,471 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:12,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-20T19:27:12,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T19:27:12,473 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 794552c8bce342231c204cc0e02fbebc in 577ms, sequenceid=213, compaction requested=true 2024-11-20T19:27:12,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:12,474 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:12,475 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:12,475 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:12,475 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:12,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:12,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:12,476 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:12,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:12,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:12,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:12,476 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:12,477 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:12,477 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:12,477 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/A is initiating minor compaction (all files) 2024-11-20T19:27:12,477 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/B is initiating minor compaction (all files) 2024-11-20T19:27:12,477 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/A in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:12,478 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/B in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:12,478 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/76c1acbf43b745309c33d1967ac3a97a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6f65b0435ec446c49a04b7483b548465, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/f9a9a92954214558b73eba8bd271be1b] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=35.9 K 2024-11-20T19:27:12,478 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1f37c8ae522445df98c7fa037ea7d169, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/7c76f98d2a1f4416b75a868a02886309, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4ebf316359dd4197b1893e1fd74c431f] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=42.9 K 2024-11-20T19:27:12,478 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 76c1acbf43b745309c33d1967ac3a97a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130830460 2024-11-20T19:27:12,478 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f37c8ae522445df98c7fa037ea7d169, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130830460 2024-11-20T19:27:12,478 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f65b0435ec446c49a04b7483b548465, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732130831132 2024-11-20T19:27:12,478 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c76f98d2a1f4416b75a868a02886309, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732130831132 2024-11-20T19:27:12,479 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ebf316359dd4197b1893e1fd74c431f, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732130831772 2024-11-20T19:27:12,479 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting f9a9a92954214558b73eba8bd271be1b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732130831772 2024-11-20T19:27:12,506 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#A#compaction#468 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:12,510 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/8f2f1adaf5004771a4791df127b96aa2 is 50, key is test_row_0/A:col10/1732130831896/Put/seqid=0 2024-11-20T19:27:12,523 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#B#compaction#469 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:12,523 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/b8b9a049924a4ee98cae88644ba0f5fe is 50, key is test_row_0/B:col10/1732130831896/Put/seqid=0 2024-11-20T19:27:12,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742381_1557 (size=12561) 2024-11-20T19:27:12,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T19:27:12,579 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-20T19:27:12,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:12,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:12,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:12,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:12,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:12,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:12,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742382_1558 (size=12561) 2024-11-20T19:27:12,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:12,588 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/b8b9a049924a4ee98cae88644ba0f5fe as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/b8b9a049924a4ee98cae88644ba0f5fe 2024-11-20T19:27:12,595 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/B of 794552c8bce342231c204cc0e02fbebc into b8b9a049924a4ee98cae88644ba0f5fe(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:12,595 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:12,595 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/B, priority=13, startTime=1732130832476; duration=0sec 2024-11-20T19:27:12,595 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:12,595 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:B 2024-11-20T19:27:12,595 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:12,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/4a36aeb844fd4aa68e62b7fb9a70ac3d is 50, key is test_row_0/A:col10/1732130831943/Put/seqid=0 2024-11-20T19:27:12,596 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:12,596 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/C is initiating minor compaction (all files) 2024-11-20T19:27:12,596 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/C in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:12,596 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/c09f9358568544dca65fe23a7fe0cc49, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e07990235b8a49db89d68dd81bfd5335, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e5bccfd9b1be4cfc803ab8acda2caa5b] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=35.9 K 2024-11-20T19:27:12,597 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting c09f9358568544dca65fe23a7fe0cc49, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130830460 2024-11-20T19:27:12,597 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting e07990235b8a49db89d68dd81bfd5335, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732130831132 2024-11-20T19:27:12,598 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting e5bccfd9b1be4cfc803ab8acda2caa5b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732130831772 2024-11-20T19:27:12,627 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T19:27:12,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:12,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:12,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:12,628 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130892635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130892637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130892638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130892635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130892639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742383_1559 (size=14541) 2024-11-20T19:27:12,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/4a36aeb844fd4aa68e62b7fb9a70ac3d 2024-11-20T19:27:12,670 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#C#compaction#471 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:12,670 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/98a1e68d561544699636ce39017d8fa4 is 50, key is test_row_0/C:col10/1732130831896/Put/seqid=0 2024-11-20T19:27:12,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/dbe493a6f1a94ceabb0bd31fc2845d80 is 50, key is test_row_0/B:col10/1732130831943/Put/seqid=0 2024-11-20T19:27:12,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742384_1560 (size=12561) 2024-11-20T19:27:12,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742385_1561 (size=12151) 2024-11-20T19:27:12,732 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/98a1e68d561544699636ce39017d8fa4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/98a1e68d561544699636ce39017d8fa4 2024-11-20T19:27:12,738 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/C of 794552c8bce342231c204cc0e02fbebc into 98a1e68d561544699636ce39017d8fa4(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:12,738 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:12,738 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/C, priority=13, startTime=1732130832476; duration=0sec 2024-11-20T19:27:12,739 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:12,739 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:C 2024-11-20T19:27:12,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130892752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130892752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130892752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130892752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130892753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T19:27:12,780 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T19:27:12,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:12,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:12,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:12,781 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,933 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T19:27:12,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:12,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:12,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:12,935 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:12,966 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/8f2f1adaf5004771a4791df127b96aa2 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/8f2f1adaf5004771a4791df127b96aa2 2024-11-20T19:27:12,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130892959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130892960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130892961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130892962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:12,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130892962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:12,972 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/A of 794552c8bce342231c204cc0e02fbebc into 8f2f1adaf5004771a4791df127b96aa2(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:12,972 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:12,972 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/A, priority=13, startTime=1732130832475; duration=0sec 2024-11-20T19:27:12,972 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:12,972 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:A 2024-11-20T19:27:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T19:27:13,086 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T19:27:13,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:13,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:13,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:13,088 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:13,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/dbe493a6f1a94ceabb0bd31fc2845d80 2024-11-20T19:27:13,143 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/23cd5a1809ac4c559055ac746145d3ce is 50, key is test_row_0/C:col10/1732130831943/Put/seqid=0 2024-11-20T19:27:13,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742386_1562 (size=12151) 2024-11-20T19:27:13,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/23cd5a1809ac4c559055ac746145d3ce 2024-11-20T19:27:13,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/4a36aeb844fd4aa68e62b7fb9a70ac3d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4a36aeb844fd4aa68e62b7fb9a70ac3d 2024-11-20T19:27:13,176 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4a36aeb844fd4aa68e62b7fb9a70ac3d, entries=200, sequenceid=234, filesize=14.2 K 2024-11-20T19:27:13,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/dbe493a6f1a94ceabb0bd31fc2845d80 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/dbe493a6f1a94ceabb0bd31fc2845d80 2024-11-20T19:27:13,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/dbe493a6f1a94ceabb0bd31fc2845d80, entries=150, sequenceid=234, filesize=11.9 K 2024-11-20T19:27:13,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/23cd5a1809ac4c559055ac746145d3ce as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/23cd5a1809ac4c559055ac746145d3ce 2024-11-20T19:27:13,184 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/23cd5a1809ac4c559055ac746145d3ce, entries=150, sequenceid=234, filesize=11.9 K 2024-11-20T19:27:13,185 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 794552c8bce342231c204cc0e02fbebc in 606ms, sequenceid=234, compaction requested=false 2024-11-20T19:27:13,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:13,240 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,240 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-20T19:27:13,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:13,241 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T19:27:13,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:13,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:13,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:13,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:13,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:13,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:13,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/fd798b8893384baea82b19ff7a475fb8 is 50, key is test_row_0/A:col10/1732130832638/Put/seqid=0 2024-11-20T19:27:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:13,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:13,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742387_1563 (size=12151) 2024-11-20T19:27:13,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130893300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130893302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130893309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130893315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130893315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130893416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130893416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130893416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130893425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130893426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T19:27:13,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130893625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130893626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130893628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130893632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130893639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,710 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/fd798b8893384baea82b19ff7a475fb8 2024-11-20T19:27:13,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/a197c3475072428ca69b5f23a1a16a3e is 50, key is test_row_0/B:col10/1732130832638/Put/seqid=0 2024-11-20T19:27:13,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742388_1564 (size=12151) 2024-11-20T19:27:13,758 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/a197c3475072428ca69b5f23a1a16a3e 2024-11-20T19:27:13,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/78c38127061e450fb298b3ecf9aab7db is 50, key is test_row_0/C:col10/1732130832638/Put/seqid=0 2024-11-20T19:27:13,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742389_1565 (size=12151) 2024-11-20T19:27:13,797 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/78c38127061e450fb298b3ecf9aab7db 2024-11-20T19:27:13,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/fd798b8893384baea82b19ff7a475fb8 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/fd798b8893384baea82b19ff7a475fb8 2024-11-20T19:27:13,807 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/fd798b8893384baea82b19ff7a475fb8, entries=150, sequenceid=252, filesize=11.9 K 2024-11-20T19:27:13,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/a197c3475072428ca69b5f23a1a16a3e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/a197c3475072428ca69b5f23a1a16a3e 2024-11-20T19:27:13,818 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/a197c3475072428ca69b5f23a1a16a3e, entries=150, sequenceid=252, filesize=11.9 K 2024-11-20T19:27:13,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/78c38127061e450fb298b3ecf9aab7db as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/78c38127061e450fb298b3ecf9aab7db 2024-11-20T19:27:13,825 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/78c38127061e450fb298b3ecf9aab7db, entries=150, sequenceid=252, filesize=11.9 K 2024-11-20T19:27:13,826 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 794552c8bce342231c204cc0e02fbebc in 585ms, sequenceid=252, compaction requested=true 2024-11-20T19:27:13,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:13,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:13,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-20T19:27:13,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-20T19:27:13,832 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-20T19:27:13,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3530 sec 2024-11-20T19:27:13,834 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.3620 sec 2024-11-20T19:27:13,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:13,936 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T19:27:13,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:13,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:13,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:13,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:13,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:13,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:13,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/a1a0bdf60c414b70939a330927ac16a5 is 50, key is test_row_0/A:col10/1732130833298/Put/seqid=0 2024-11-20T19:27:13,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130893957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130893960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130893962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130893967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:13,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742390_1566 (size=17181) 2024-11-20T19:27:13,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/a1a0bdf60c414b70939a330927ac16a5 2024-11-20T19:27:13,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:13,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130893968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,000 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/5e88db0400dc4e779aad6e9395a9cc0d is 50, key is test_row_0/B:col10/1732130833298/Put/seqid=0 2024-11-20T19:27:14,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742391_1567 (size=12301) 2024-11-20T19:27:14,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130894069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130894074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130894076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130894077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130894082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130894277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130894282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130894284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130894288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130894289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,448 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/5e88db0400dc4e779aad6e9395a9cc0d 2024-11-20T19:27:14,472 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/0d3f7eea94f44e138884b62a3c58ed73 is 50, key is test_row_0/C:col10/1732130833298/Put/seqid=0 2024-11-20T19:27:14,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742392_1568 (size=12301) 2024-11-20T19:27:14,515 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/0d3f7eea94f44e138884b62a3c58ed73 2024-11-20T19:27:14,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/a1a0bdf60c414b70939a330927ac16a5 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a1a0bdf60c414b70939a330927ac16a5 2024-11-20T19:27:14,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a1a0bdf60c414b70939a330927ac16a5, entries=250, sequenceid=277, filesize=16.8 K 2024-11-20T19:27:14,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/5e88db0400dc4e779aad6e9395a9cc0d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/5e88db0400dc4e779aad6e9395a9cc0d 2024-11-20T19:27:14,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/5e88db0400dc4e779aad6e9395a9cc0d, entries=150, sequenceid=277, filesize=12.0 K 2024-11-20T19:27:14,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/0d3f7eea94f44e138884b62a3c58ed73 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/0d3f7eea94f44e138884b62a3c58ed73 2024-11-20T19:27:14,531 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/0d3f7eea94f44e138884b62a3c58ed73, entries=150, sequenceid=277, filesize=12.0 K 2024-11-20T19:27:14,532 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 794552c8bce342231c204cc0e02fbebc in 596ms, sequenceid=277, compaction requested=true 2024-11-20T19:27:14,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:14,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:14,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:14,532 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:14,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:14,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:14,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:14,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T19:27:14,532 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:14,535 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49164 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:14,535 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/B is initiating minor compaction (all files) 2024-11-20T19:27:14,535 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/B in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:14,535 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/b8b9a049924a4ee98cae88644ba0f5fe, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/dbe493a6f1a94ceabb0bd31fc2845d80, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/a197c3475072428ca69b5f23a1a16a3e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/5e88db0400dc4e779aad6e9395a9cc0d] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=48.0 K 2024-11-20T19:27:14,535 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 56434 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:14,535 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/A is initiating minor compaction (all files) 2024-11-20T19:27:14,535 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/A in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:14,535 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/8f2f1adaf5004771a4791df127b96aa2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4a36aeb844fd4aa68e62b7fb9a70ac3d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/fd798b8893384baea82b19ff7a475fb8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a1a0bdf60c414b70939a330927ac16a5] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=55.1 K 2024-11-20T19:27:14,536 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8b9a049924a4ee98cae88644ba0f5fe, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732130831772 2024-11-20T19:27:14,536 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f2f1adaf5004771a4791df127b96aa2, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732130831772 2024-11-20T19:27:14,536 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbe493a6f1a94ceabb0bd31fc2845d80, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732130831943 2024-11-20T19:27:14,537 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a36aeb844fd4aa68e62b7fb9a70ac3d, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732130831943 2024-11-20T19:27:14,537 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting a197c3475072428ca69b5f23a1a16a3e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732130832616 2024-11-20T19:27:14,537 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting fd798b8893384baea82b19ff7a475fb8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732130832616 2024-11-20T19:27:14,537 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e88db0400dc4e779aad6e9395a9cc0d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732130833298 2024-11-20T19:27:14,538 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a1a0bdf60c414b70939a330927ac16a5, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732130833298 2024-11-20T19:27:14,560 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#A#compaction#480 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:14,560 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/fcf53cc61b5e444b96531d91f78f1f88 is 50, key is test_row_0/A:col10/1732130833298/Put/seqid=0 2024-11-20T19:27:14,565 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#B#compaction#481 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:14,566 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/16d1493a542a4b689f3f29cdc469328b is 50, key is test_row_0/B:col10/1732130833298/Put/seqid=0 2024-11-20T19:27:14,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-20T19:27:14,578 INFO [Thread-2263 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-20T19:27:14,579 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:14,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-20T19:27:14,580 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:14,580 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:14,580 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:14,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T19:27:14,597 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:27:14,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:14,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:14,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:14,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:14,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:14,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:14,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:14,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742393_1569 (size=12847) 2024-11-20T19:27:14,626 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/fcf53cc61b5e444b96531d91f78f1f88 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/fcf53cc61b5e444b96531d91f78f1f88 2024-11-20T19:27:14,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742394_1570 (size=12847) 2024-11-20T19:27:14,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/808a8c692a5e4839bdeeb75687666aa7 is 50, key is test_row_0/A:col10/1732130833967/Put/seqid=0 2024-11-20T19:27:14,630 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/A of 794552c8bce342231c204cc0e02fbebc into fcf53cc61b5e444b96531d91f78f1f88(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:14,630 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:14,630 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/A, priority=12, startTime=1732130834532; duration=0sec 2024-11-20T19:27:14,631 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:14,631 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:A 2024-11-20T19:27:14,631 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:14,635 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49164 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:14,635 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/C is initiating minor compaction (all files) 2024-11-20T19:27:14,635 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/C in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:14,635 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/98a1e68d561544699636ce39017d8fa4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/23cd5a1809ac4c559055ac746145d3ce, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/78c38127061e450fb298b3ecf9aab7db, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/0d3f7eea94f44e138884b62a3c58ed73] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=48.0 K 2024-11-20T19:27:14,637 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/16d1493a542a4b689f3f29cdc469328b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/16d1493a542a4b689f3f29cdc469328b 2024-11-20T19:27:14,637 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 98a1e68d561544699636ce39017d8fa4, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1732130831772 2024-11-20T19:27:14,638 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 23cd5a1809ac4c559055ac746145d3ce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732130831943 2024-11-20T19:27:14,639 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 78c38127061e450fb298b3ecf9aab7db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1732130832616 2024-11-20T19:27:14,639 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d3f7eea94f44e138884b62a3c58ed73, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732130833298 2024-11-20T19:27:14,641 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/B of 794552c8bce342231c204cc0e02fbebc into 16d1493a542a4b689f3f29cdc469328b(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:14,642 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:14,642 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/B, priority=12, startTime=1732130834532; duration=0sec 2024-11-20T19:27:14,642 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:14,642 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:B 2024-11-20T19:27:14,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130894647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130894648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130894653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130894654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130894656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742395_1571 (size=14741) 2024-11-20T19:27:14,669 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#C#compaction#483 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:14,670 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/f47dfcdb4163416d944de27b7a870757 is 50, key is test_row_0/C:col10/1732130833298/Put/seqid=0 2024-11-20T19:27:14,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T19:27:14,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742396_1572 (size=12847) 2024-11-20T19:27:14,696 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/f47dfcdb4163416d944de27b7a870757 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/f47dfcdb4163416d944de27b7a870757 2024-11-20T19:27:14,702 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/C of 794552c8bce342231c204cc0e02fbebc into f47dfcdb4163416d944de27b7a870757(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:14,702 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:14,702 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/C, priority=12, startTime=1732130834532; duration=0sec 2024-11-20T19:27:14,703 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:14,703 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:C 2024-11-20T19:27:14,732 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,732 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:14,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:14,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:14,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:14,732 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:14,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:14,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:14,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130894764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130894764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130894766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130894767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130894767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T19:27:14,885 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:14,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:14,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:14,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:14,887 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:14,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:14,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:14,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130894967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130894967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130894973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130894974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:14,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:14,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130894974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,040 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,041 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:15,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:15,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,042 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,067 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/808a8c692a5e4839bdeeb75687666aa7 2024-11-20T19:27:15,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/0fa96fdb636448f591abce2c59dbbeaa is 50, key is test_row_0/B:col10/1732130833967/Put/seqid=0 2024-11-20T19:27:15,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742397_1573 (size=12301) 2024-11-20T19:27:15,124 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/0fa96fdb636448f591abce2c59dbbeaa 2024-11-20T19:27:15,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/552083c9c8394f4cb3f1807bafa85463 is 50, key is test_row_0/C:col10/1732130833967/Put/seqid=0 2024-11-20T19:27:15,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742398_1574 (size=12301) 2024-11-20T19:27:15,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/552083c9c8394f4cb3f1807bafa85463 2024-11-20T19:27:15,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/808a8c692a5e4839bdeeb75687666aa7 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/808a8c692a5e4839bdeeb75687666aa7 2024-11-20T19:27:15,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T19:27:15,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/808a8c692a5e4839bdeeb75687666aa7, entries=200, sequenceid=289, filesize=14.4 K 2024-11-20T19:27:15,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/0fa96fdb636448f591abce2c59dbbeaa as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/0fa96fdb636448f591abce2c59dbbeaa 2024-11-20T19:27:15,193 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:15,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:15,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,193 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/0fa96fdb636448f591abce2c59dbbeaa, entries=150, sequenceid=289, filesize=12.0 K 2024-11-20T19:27:15,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/552083c9c8394f4cb3f1807bafa85463 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/552083c9c8394f4cb3f1807bafa85463 2024-11-20T19:27:15,208 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/552083c9c8394f4cb3f1807bafa85463, entries=150, sequenceid=289, filesize=12.0 K 2024-11-20T19:27:15,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 794552c8bce342231c204cc0e02fbebc in 612ms, sequenceid=289, compaction requested=false 2024-11-20T19:27:15,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:15,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:15,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-20T19:27:15,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:15,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:15,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:15,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:15,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:15,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:15,285 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/60601ba4860345bb9bd497a297429ca7 is 50, key is test_row_0/A:col10/1732130835276/Put/seqid=0 2024-11-20T19:27:15,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130895285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742399_1575 (size=14741) 2024-11-20T19:27:15,291 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130895287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,291 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/60601ba4860345bb9bd497a297429ca7 2024-11-20T19:27:15,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/4125b0ff3f494ef399f81b135f528540 is 50, key is test_row_0/B:col10/1732130835276/Put/seqid=0 2024-11-20T19:27:15,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130895288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130895289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130895290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742400_1576 (size=12301) 2024-11-20T19:27:15,315 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/4125b0ff3f494ef399f81b135f528540 2024-11-20T19:27:15,323 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/8194f8d2ad63498abbad355b71cc14e6 is 50, key is test_row_0/C:col10/1732130835276/Put/seqid=0 2024-11-20T19:27:15,345 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:15,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742401_1577 (size=12301) 2024-11-20T19:27:15,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:15,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,348 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/8194f8d2ad63498abbad355b71cc14e6 2024-11-20T19:27:15,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/60601ba4860345bb9bd497a297429ca7 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/60601ba4860345bb9bd497a297429ca7 2024-11-20T19:27:15,359 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/60601ba4860345bb9bd497a297429ca7, entries=200, sequenceid=318, filesize=14.4 K 2024-11-20T19:27:15,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/4125b0ff3f494ef399f81b135f528540 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/4125b0ff3f494ef399f81b135f528540 2024-11-20T19:27:15,367 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/4125b0ff3f494ef399f81b135f528540, entries=150, sequenceid=318, filesize=12.0 K 2024-11-20T19:27:15,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/8194f8d2ad63498abbad355b71cc14e6 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/8194f8d2ad63498abbad355b71cc14e6 2024-11-20T19:27:15,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/8194f8d2ad63498abbad355b71cc14e6, entries=150, sequenceid=318, filesize=12.0 K 2024-11-20T19:27:15,371 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 794552c8bce342231c204cc0e02fbebc in 95ms, sequenceid=318, compaction requested=true 2024-11-20T19:27:15,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:15,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:15,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:15,372 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:15,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:15,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:15,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:15,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T19:27:15,372 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:15,375 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42329 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:15,376 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/A is initiating minor compaction (all files) 2024-11-20T19:27:15,376 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37449 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:15,376 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/B is initiating minor compaction (all files) 2024-11-20T19:27:15,376 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/A in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,376 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/B in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,376 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/fcf53cc61b5e444b96531d91f78f1f88, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/808a8c692a5e4839bdeeb75687666aa7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/60601ba4860345bb9bd497a297429ca7] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=41.3 K 2024-11-20T19:27:15,376 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/16d1493a542a4b689f3f29cdc469328b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/0fa96fdb636448f591abce2c59dbbeaa, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/4125b0ff3f494ef399f81b135f528540] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=36.6 K 2024-11-20T19:27:15,376 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 16d1493a542a4b689f3f29cdc469328b, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732130833298 2024-11-20T19:27:15,376 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting fcf53cc61b5e444b96531d91f78f1f88, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732130833298 2024-11-20T19:27:15,377 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 808a8c692a5e4839bdeeb75687666aa7, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732130833954 2024-11-20T19:27:15,377 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fa96fdb636448f591abce2c59dbbeaa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732130833954 2024-11-20T19:27:15,377 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 4125b0ff3f494ef399f81b135f528540, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732130834651 2024-11-20T19:27:15,377 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60601ba4860345bb9bd497a297429ca7, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732130834651 2024-11-20T19:27:15,398 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#B#compaction#489 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:15,399 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/7fbaebae81014543838c4947bd7893c0 is 50, key is test_row_0/B:col10/1732130835276/Put/seqid=0 2024-11-20T19:27:15,399 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:27:15,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:15,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:15,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:15,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:15,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:15,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:15,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:15,405 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#A#compaction#490 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:15,405 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/47ef50167c3c4d60adc4459bd6b25f55 is 50, key is test_row_0/A:col10/1732130835276/Put/seqid=0 2024-11-20T19:27:15,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/7275bbcac7514c94be952ad58d955938 is 50, key is test_row_0/A:col10/1732130835289/Put/seqid=0 2024-11-20T19:27:15,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742402_1578 (size=12949) 2024-11-20T19:27:15,459 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/7fbaebae81014543838c4947bd7893c0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/7fbaebae81014543838c4947bd7893c0 2024-11-20T19:27:15,466 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/B of 794552c8bce342231c204cc0e02fbebc into 7fbaebae81014543838c4947bd7893c0(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:15,466 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:15,466 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/B, priority=13, startTime=1732130835372; duration=0sec 2024-11-20T19:27:15,466 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:15,466 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:B 2024-11-20T19:27:15,466 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:15,468 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37449 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:15,468 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/C is initiating minor compaction (all files) 2024-11-20T19:27:15,468 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/C in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,468 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/f47dfcdb4163416d944de27b7a870757, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/552083c9c8394f4cb3f1807bafa85463, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/8194f8d2ad63498abbad355b71cc14e6] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=36.6 K 2024-11-20T19:27:15,469 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting f47dfcdb4163416d944de27b7a870757, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732130833298 2024-11-20T19:27:15,469 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 552083c9c8394f4cb3f1807bafa85463, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732130833954 2024-11-20T19:27:15,470 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 8194f8d2ad63498abbad355b71cc14e6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732130834651 2024-11-20T19:27:15,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130895463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130895466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,480 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130895467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130895464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130895469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742403_1579 (size=12949) 2024-11-20T19:27:15,492 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/47ef50167c3c4d60adc4459bd6b25f55 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/47ef50167c3c4d60adc4459bd6b25f55 2024-11-20T19:27:15,496 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/A of 794552c8bce342231c204cc0e02fbebc into 47ef50167c3c4d60adc4459bd6b25f55(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:15,496 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:15,496 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/A, priority=13, startTime=1732130835372; duration=0sec 2024-11-20T19:27:15,496 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:15,496 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:A 2024-11-20T19:27:15,499 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:15,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:15,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,500 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,500 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#C#compaction#492 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:15,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,501 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/14425ecb2e4543128a01cf1e52659ab1 is 50, key is test_row_0/C:col10/1732130835276/Put/seqid=0 2024-11-20T19:27:15,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742404_1580 (size=12301) 2024-11-20T19:27:15,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742405_1581 (size=12949) 2024-11-20T19:27:15,567 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/14425ecb2e4543128a01cf1e52659ab1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/14425ecb2e4543128a01cf1e52659ab1 2024-11-20T19:27:15,578 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/C of 794552c8bce342231c204cc0e02fbebc into 14425ecb2e4543128a01cf1e52659ab1(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:15,578 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:15,578 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/C, priority=13, startTime=1732130835372; duration=0sec 2024-11-20T19:27:15,578 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:15,578 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:C 2024-11-20T19:27:15,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130895580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130895580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130895580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130895581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130895581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,652 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:15,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:15,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,654 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T19:27:15,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130895787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130895787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130895787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130895788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,793 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:15,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130895788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,805 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:15,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:15,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,806 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/7275bbcac7514c94be952ad58d955938 2024-11-20T19:27:15,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/a5a041e90fd6435887d1591b6a41ff98 is 50, key is test_row_0/B:col10/1732130835289/Put/seqid=0 2024-11-20T19:27:15,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742406_1582 (size=12301) 2024-11-20T19:27:15,952 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/a5a041e90fd6435887d1591b6a41ff98 2024-11-20T19:27:15,958 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:15,958 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:15,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:15,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:15,959 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:15,964 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/3df8d501ef3d4c6ea78032c8184d41ee is 50, key is test_row_0/C:col10/1732130835289/Put/seqid=0 2024-11-20T19:27:16,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742407_1583 (size=12301) 2024-11-20T19:27:16,009 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/3df8d501ef3d4c6ea78032c8184d41ee 2024-11-20T19:27:16,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/7275bbcac7514c94be952ad58d955938 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/7275bbcac7514c94be952ad58d955938 2024-11-20T19:27:16,021 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/7275bbcac7514c94be952ad58d955938, entries=150, sequenceid=329, filesize=12.0 K 2024-11-20T19:27:16,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/a5a041e90fd6435887d1591b6a41ff98 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/a5a041e90fd6435887d1591b6a41ff98 2024-11-20T19:27:16,030 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/a5a041e90fd6435887d1591b6a41ff98, entries=150, sequenceid=329, filesize=12.0 K 2024-11-20T19:27:16,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/3df8d501ef3d4c6ea78032c8184d41ee as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/3df8d501ef3d4c6ea78032c8184d41ee 2024-11-20T19:27:16,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/3df8d501ef3d4c6ea78032c8184d41ee, entries=150, sequenceid=329, filesize=12.0 K 2024-11-20T19:27:16,037 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 794552c8bce342231c204cc0e02fbebc in 637ms, sequenceid=329, compaction requested=false 2024-11-20T19:27:16,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:16,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:16,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T19:27:16,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:16,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:16,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:16,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:16,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:16,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:16,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/40b60e280c2247a3b0f6f5283b447d08 is 50, key is test_row_0/A:col10/1732130835464/Put/seqid=0 2024-11-20T19:27:16,110 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:16,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:16,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:16,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:16,112 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130896103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130896106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130896106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130896112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130896115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742408_1584 (size=14741) 2024-11-20T19:27:16,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/40b60e280c2247a3b0f6f5283b447d08 2024-11-20T19:27:16,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/38f5c0133bee4ac9b54b5d82e26d2b87 is 50, key is test_row_0/B:col10/1732130835464/Put/seqid=0 2024-11-20T19:27:16,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742409_1585 (size=12301) 2024-11-20T19:27:16,168 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/38f5c0133bee4ac9b54b5d82e26d2b87 2024-11-20T19:27:16,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/b33df585a7644c33b4434fe494243676 is 50, key is test_row_0/C:col10/1732130835464/Put/seqid=0 2024-11-20T19:27:16,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742410_1586 (size=12301) 2024-11-20T19:27:16,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130896215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130896216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130896216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130896216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130896223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,264 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:16,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:16,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:16,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:16,265 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,417 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,417 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:16,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:16,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:16,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:16,418 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130896420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130896420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130896422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130896423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130896434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,570 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:16,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:16,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:16,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:16,570 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:16,618 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/b33df585a7644c33b4434fe494243676 2024-11-20T19:27:16,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/40b60e280c2247a3b0f6f5283b447d08 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/40b60e280c2247a3b0f6f5283b447d08 2024-11-20T19:27:16,625 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/40b60e280c2247a3b0f6f5283b447d08, entries=200, sequenceid=359, filesize=14.4 K 2024-11-20T19:27:16,627 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/38f5c0133bee4ac9b54b5d82e26d2b87 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/38f5c0133bee4ac9b54b5d82e26d2b87 2024-11-20T19:27:16,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/38f5c0133bee4ac9b54b5d82e26d2b87, entries=150, sequenceid=359, filesize=12.0 K 2024-11-20T19:27:16,633 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/b33df585a7644c33b4434fe494243676 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/b33df585a7644c33b4434fe494243676 2024-11-20T19:27:16,636 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/b33df585a7644c33b4434fe494243676, entries=150, sequenceid=359, filesize=12.0 K 2024-11-20T19:27:16,636 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 794552c8bce342231c204cc0e02fbebc in 540ms, sequenceid=359, compaction requested=true 2024-11-20T19:27:16,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:16,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:16,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:16,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:16,636 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:16,636 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:16,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:16,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-20T19:27:16,637 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:16,638 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:16,638 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/B is initiating minor compaction (all files) 2024-11-20T19:27:16,638 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/B in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:16,638 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/7fbaebae81014543838c4947bd7893c0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/a5a041e90fd6435887d1591b6a41ff98, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/38f5c0133bee4ac9b54b5d82e26d2b87] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=36.7 K 2024-11-20T19:27:16,638 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39991 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:16,638 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/A is initiating minor compaction (all files) 2024-11-20T19:27:16,639 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/A in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:16,639 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/47ef50167c3c4d60adc4459bd6b25f55, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/7275bbcac7514c94be952ad58d955938, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/40b60e280c2247a3b0f6f5283b447d08] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=39.1 K 2024-11-20T19:27:16,639 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fbaebae81014543838c4947bd7893c0, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732130834651 2024-11-20T19:27:16,639 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47ef50167c3c4d60adc4459bd6b25f55, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732130834651 2024-11-20T19:27:16,640 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7275bbcac7514c94be952ad58d955938, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732130835288 2024-11-20T19:27:16,640 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a5a041e90fd6435887d1591b6a41ff98, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732130835288 2024-11-20T19:27:16,640 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40b60e280c2247a3b0f6f5283b447d08, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732130835464 2024-11-20T19:27:16,641 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 38f5c0133bee4ac9b54b5d82e26d2b87, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732130835464 2024-11-20T19:27:16,651 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#A#compaction#498 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:16,652 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/b2787bee6c874178b0cf237f09899236 is 50, key is test_row_0/A:col10/1732130835464/Put/seqid=0 2024-11-20T19:27:16,664 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#B#compaction#499 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:16,664 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/5f30c5fa484646a58b051a79d377998d is 50, key is test_row_0/B:col10/1732130835464/Put/seqid=0 2024-11-20T19:27:16,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T19:27:16,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742411_1587 (size=13051) 2024-11-20T19:27:16,704 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/b2787bee6c874178b0cf237f09899236 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/b2787bee6c874178b0cf237f09899236 2024-11-20T19:27:16,709 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/A of 794552c8bce342231c204cc0e02fbebc into b2787bee6c874178b0cf237f09899236(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:16,710 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:16,710 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/A, priority=13, startTime=1732130836636; duration=0sec 2024-11-20T19:27:16,710 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:16,710 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:A 2024-11-20T19:27:16,710 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:16,711 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:16,711 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/C is initiating minor compaction (all files) 2024-11-20T19:27:16,711 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/C in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:16,711 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/14425ecb2e4543128a01cf1e52659ab1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/3df8d501ef3d4c6ea78032c8184d41ee, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/b33df585a7644c33b4434fe494243676] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=36.7 K 2024-11-20T19:27:16,712 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14425ecb2e4543128a01cf1e52659ab1, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732130834651 2024-11-20T19:27:16,713 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3df8d501ef3d4c6ea78032c8184d41ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732130835288 2024-11-20T19:27:16,714 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting b33df585a7644c33b4434fe494243676, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732130835464 2024-11-20T19:27:16,722 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-20T19:27:16,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:16,724 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-20T19:27:16,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:16,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:16,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:16,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:16,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:16,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:16,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742412_1588 (size=13051) 2024-11-20T19:27:16,734 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#C#compaction#500 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:16,734 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/d86b1a2aa5cb4a388e79d60e86e249b3 is 50, key is test_row_0/C:col10/1732130835464/Put/seqid=0 2024-11-20T19:27:16,738 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/5f30c5fa484646a58b051a79d377998d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/5f30c5fa484646a58b051a79d377998d 2024-11-20T19:27:16,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/40579fa6e69d45848edeb38873ddee20 is 50, key is test_row_0/A:col10/1732130836111/Put/seqid=0 2024-11-20T19:27:16,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:16,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:16,743 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/B of 794552c8bce342231c204cc0e02fbebc into 5f30c5fa484646a58b051a79d377998d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:16,743 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:16,744 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/B, priority=13, startTime=1732130836636; duration=0sec 2024-11-20T19:27:16,744 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:16,744 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:B 2024-11-20T19:27:16,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742414_1590 (size=12301) 2024-11-20T19:27:16,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742413_1589 (size=13051) 2024-11-20T19:27:16,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130896785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130896792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130896793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130896797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130896797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130896898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130896901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130896901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130896908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:16,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:16,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130896908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:17,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130897107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:17,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130897109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:17,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130897109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:17,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130897112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:17,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130897117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:17,191 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/40579fa6e69d45848edeb38873ddee20 2024-11-20T19:27:17,198 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/d86b1a2aa5cb4a388e79d60e86e249b3 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/d86b1a2aa5cb4a388e79d60e86e249b3 2024-11-20T19:27:17,202 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/C of 794552c8bce342231c204cc0e02fbebc into d86b1a2aa5cb4a388e79d60e86e249b3(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:17,202 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:17,202 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/C, priority=13, startTime=1732130836637; duration=0sec 2024-11-20T19:27:17,203 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:17,203 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:C 2024-11-20T19:27:17,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/30c9fbf2e48b4c5092c498908151216e is 50, key is test_row_0/B:col10/1732130836111/Put/seqid=0 2024-11-20T19:27:17,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742415_1591 (size=12301) 2024-11-20T19:27:17,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130897413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:17,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130897414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:17,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130897420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:17,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130897423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:17,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130897423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:17,647 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/30c9fbf2e48b4c5092c498908151216e 2024-11-20T19:27:17,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/aa340115c5444ff183c9a0a8e198883c is 50, key is test_row_0/C:col10/1732130836111/Put/seqid=0 2024-11-20T19:27:17,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742416_1592 (size=12301) 2024-11-20T19:27:17,716 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/aa340115c5444ff183c9a0a8e198883c 2024-11-20T19:27:17,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/40579fa6e69d45848edeb38873ddee20 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/40579fa6e69d45848edeb38873ddee20 2024-11-20T19:27:17,726 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/40579fa6e69d45848edeb38873ddee20, entries=150, sequenceid=370, filesize=12.0 K 2024-11-20T19:27:17,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/30c9fbf2e48b4c5092c498908151216e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/30c9fbf2e48b4c5092c498908151216e 2024-11-20T19:27:17,732 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/30c9fbf2e48b4c5092c498908151216e, entries=150, sequenceid=370, filesize=12.0 K 2024-11-20T19:27:17,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/aa340115c5444ff183c9a0a8e198883c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/aa340115c5444ff183c9a0a8e198883c 2024-11-20T19:27:17,739 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/aa340115c5444ff183c9a0a8e198883c, entries=150, sequenceid=370, filesize=12.0 K 2024-11-20T19:27:17,740 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 794552c8bce342231c204cc0e02fbebc in 1016ms, sequenceid=370, compaction requested=false 2024-11-20T19:27:17,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:17,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:17,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-20T19:27:17,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-20T19:27:17,742 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-20T19:27:17,742 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1610 sec 2024-11-20T19:27:17,743 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 3.1630 sec 2024-11-20T19:27:17,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:17,926 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-20T19:27:17,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:17,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:17,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:17,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:17,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:17,927 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:17,931 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/1e8d3999da5f482ab608564b44d24bd9 is 50, key is test_row_0/A:col10/1732130837925/Put/seqid=0 2024-11-20T19:27:17,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742417_1593 (size=14741) 2024-11-20T19:27:17,936 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/1e8d3999da5f482ab608564b44d24bd9 2024-11-20T19:27:17,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130897933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:17,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130897934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:17,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130897935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:17,946 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/65508f24de784e9fbc090eebdd6c7c92 is 50, key is test_row_0/B:col10/1732130837925/Put/seqid=0 2024-11-20T19:27:17,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130897938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:17,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:17,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130897941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:17,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742418_1594 (size=12301) 2024-11-20T19:27:17,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/65508f24de784e9fbc090eebdd6c7c92 2024-11-20T19:27:17,979 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/58732d1f03d5460992bd5a25dcae8797 is 50, key is test_row_0/C:col10/1732130837925/Put/seqid=0 2024-11-20T19:27:18,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742419_1595 (size=12301) 2024-11-20T19:27:18,013 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/58732d1f03d5460992bd5a25dcae8797 2024-11-20T19:27:18,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/1e8d3999da5f482ab608564b44d24bd9 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1e8d3999da5f482ab608564b44d24bd9 2024-11-20T19:27:18,026 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1e8d3999da5f482ab608564b44d24bd9, entries=200, sequenceid=399, filesize=14.4 K 2024-11-20T19:27:18,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/65508f24de784e9fbc090eebdd6c7c92 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/65508f24de784e9fbc090eebdd6c7c92 2024-11-20T19:27:18,030 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/65508f24de784e9fbc090eebdd6c7c92, entries=150, sequenceid=399, filesize=12.0 K 2024-11-20T19:27:18,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/58732d1f03d5460992bd5a25dcae8797 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/58732d1f03d5460992bd5a25dcae8797 2024-11-20T19:27:18,037 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/58732d1f03d5460992bd5a25dcae8797, entries=150, sequenceid=399, filesize=12.0 K 2024-11-20T19:27:18,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 794552c8bce342231c204cc0e02fbebc in 112ms, sequenceid=399, compaction requested=true 2024-11-20T19:27:18,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:18,038 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:18,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:18,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:18,041 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:18,041 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/A is initiating minor compaction (all files) 2024-11-20T19:27:18,041 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/A in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:18,041 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/b2787bee6c874178b0cf237f09899236, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/40579fa6e69d45848edeb38873ddee20, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1e8d3999da5f482ab608564b44d24bd9] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=39.2 K 2024-11-20T19:27:18,042 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:18,042 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2787bee6c874178b0cf237f09899236, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732130835464 2024-11-20T19:27:18,042 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40579fa6e69d45848edeb38873ddee20, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732130836102 2024-11-20T19:27:18,043 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e8d3999da5f482ab608564b44d24bd9, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732130836791 2024-11-20T19:27:18,043 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:18,043 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/B is initiating minor compaction (all files) 2024-11-20T19:27:18,043 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/B in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:18,043 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/5f30c5fa484646a58b051a79d377998d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/30c9fbf2e48b4c5092c498908151216e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/65508f24de784e9fbc090eebdd6c7c92] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=36.8 K 2024-11-20T19:27:18,044 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f30c5fa484646a58b051a79d377998d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732130835464 2024-11-20T19:27:18,044 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 30c9fbf2e48b4c5092c498908151216e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732130836102 2024-11-20T19:27:18,044 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 65508f24de784e9fbc090eebdd6c7c92, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732130836794 2024-11-20T19:27:18,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:18,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:18,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:18,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:18,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:18,057 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:27:18,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:18,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:18,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:18,059 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,070 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#A#compaction#507 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:18,070 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/4ec784b9d890405e8717aed821028640 is 50, key is test_row_0/A:col10/1732130837925/Put/seqid=0 2024-11-20T19:27:18,071 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#B#compaction#508 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:18,071 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/ea461df827d249d28ce0550fd55dfa97 is 50, key is test_row_0/B:col10/1732130837925/Put/seqid=0 2024-11-20T19:27:18,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/c68d0c588f2340dc8850c4400078cf84 is 50, key is test_row_0/A:col10/1732130838054/Put/seqid=0 2024-11-20T19:27:18,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742420_1596 (size=13153) 2024-11-20T19:27:18,120 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/4ec784b9d890405e8717aed821028640 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4ec784b9d890405e8717aed821028640 2024-11-20T19:27:18,125 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/A of 794552c8bce342231c204cc0e02fbebc into 4ec784b9d890405e8717aed821028640(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:18,125 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:18,125 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/A, priority=13, startTime=1732130838038; duration=0sec 2024-11-20T19:27:18,126 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:18,126 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:A 2024-11-20T19:27:18,126 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:18,127 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:18,127 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/C is initiating minor compaction (all files) 2024-11-20T19:27:18,127 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/C in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:18,127 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/d86b1a2aa5cb4a388e79d60e86e249b3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/aa340115c5444ff183c9a0a8e198883c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/58732d1f03d5460992bd5a25dcae8797] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=36.8 K 2024-11-20T19:27:18,127 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d86b1a2aa5cb4a388e79d60e86e249b3, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732130835464 2024-11-20T19:27:18,127 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa340115c5444ff183c9a0a8e198883c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1732130836102 2024-11-20T19:27:18,128 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58732d1f03d5460992bd5a25dcae8797, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732130836794 2024-11-20T19:27:18,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742421_1597 (size=13153) 2024-11-20T19:27:18,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130898134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:18,153 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/ea461df827d249d28ce0550fd55dfa97 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/ea461df827d249d28ce0550fd55dfa97 2024-11-20T19:27:18,159 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/B of 794552c8bce342231c204cc0e02fbebc into ea461df827d249d28ce0550fd55dfa97(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:18,159 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:18,159 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/B, priority=13, startTime=1732130838040; duration=0sec 2024-11-20T19:27:18,159 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:18,159 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:B 2024-11-20T19:27:18,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130898149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:18,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130898147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:18,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742422_1598 (size=12301) 2024-11-20T19:27:18,178 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/c68d0c588f2340dc8850c4400078cf84 2024-11-20T19:27:18,184 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#C#compaction#510 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:18,184 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/af9baaba0ec348878f6be4e93900c6c2 is 50, key is test_row_0/C:col10/1732130837925/Put/seqid=0 2024-11-20T19:27:18,192 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/90e60deb29834d689ed050becd1e70bd is 50, key is test_row_0/B:col10/1732130838054/Put/seqid=0 2024-11-20T19:27:18,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742423_1599 (size=13153) 2024-11-20T19:27:18,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742424_1600 (size=12301) 2024-11-20T19:27:18,252 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/90e60deb29834d689ed050becd1e70bd 2024-11-20T19:27:18,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130898252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:18,264 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/08f32b0081854acd9dda47a8113aa0f8 is 50, key is test_row_0/C:col10/1732130838054/Put/seqid=0 2024-11-20T19:27:18,278 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130898271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:18,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130898271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:18,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742425_1601 (size=12301) 2024-11-20T19:27:18,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130898457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:18,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130898480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:18,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130898481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:18,625 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/af9baaba0ec348878f6be4e93900c6c2 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/af9baaba0ec348878f6be4e93900c6c2 2024-11-20T19:27:18,631 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/C of 794552c8bce342231c204cc0e02fbebc into af9baaba0ec348878f6be4e93900c6c2(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:18,631 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:18,631 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/C, priority=13, startTime=1732130838057; duration=0sec 2024-11-20T19:27:18,632 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:18,632 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:C 2024-11-20T19:27:18,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-20T19:27:18,690 INFO [Thread-2263 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-20T19:27:18,691 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:18,692 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/08f32b0081854acd9dda47a8113aa0f8 2024-11-20T19:27:18,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-20T19:27:18,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T19:27:18,699 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:18,699 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:18,699 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:18,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/c68d0c588f2340dc8850c4400078cf84 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/c68d0c588f2340dc8850c4400078cf84 2024-11-20T19:27:18,707 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/c68d0c588f2340dc8850c4400078cf84, entries=150, sequenceid=411, filesize=12.0 K 2024-11-20T19:27:18,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/90e60deb29834d689ed050becd1e70bd as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/90e60deb29834d689ed050becd1e70bd 2024-11-20T19:27:18,715 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/90e60deb29834d689ed050becd1e70bd, entries=150, sequenceid=411, filesize=12.0 K 2024-11-20T19:27:18,716 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/08f32b0081854acd9dda47a8113aa0f8 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/08f32b0081854acd9dda47a8113aa0f8 2024-11-20T19:27:18,725 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/08f32b0081854acd9dda47a8113aa0f8, entries=150, sequenceid=411, filesize=12.0 K 2024-11-20T19:27:18,726 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 794552c8bce342231c204cc0e02fbebc in 669ms, sequenceid=411, compaction requested=false 2024-11-20T19:27:18,726 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:18,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:18,771 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:27:18,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:18,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:18,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:18,772 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/a3b12fa5a1a549aa95ee60c548857505 is 50, key is test_row_0/A:col10/1732130838770/Put/seqid=0 2024-11-20T19:27:18,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T19:27:18,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742426_1602 (size=14741) 2024-11-20T19:27:18,800 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/a3b12fa5a1a549aa95ee60c548857505 2024-11-20T19:27:18,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130898801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:18,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/87ac6b64e2e342369d9cec25efd384ad is 50, key is test_row_0/B:col10/1732130838770/Put/seqid=0 2024-11-20T19:27:18,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130898806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:18,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130898806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:18,850 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:18,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T19:27:18,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:18,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:18,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:18,850 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:18,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:18,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:18,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742427_1603 (size=12301) 2024-11-20T19:27:18,866 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/87ac6b64e2e342369d9cec25efd384ad 2024-11-20T19:27:18,880 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/b1b29ba894924fe19e2c66f87f135481 is 50, key is test_row_0/C:col10/1732130838770/Put/seqid=0 2024-11-20T19:27:18,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742428_1604 (size=12301) 2024-11-20T19:27:18,912 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=439 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/b1b29ba894924fe19e2c66f87f135481 2024-11-20T19:27:18,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/a3b12fa5a1a549aa95ee60c548857505 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a3b12fa5a1a549aa95ee60c548857505 2024-11-20T19:27:18,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130898912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:18,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130898914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:18,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:18,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130898914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:18,923 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a3b12fa5a1a549aa95ee60c548857505, entries=200, sequenceid=439, filesize=14.4 K 2024-11-20T19:27:18,924 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/87ac6b64e2e342369d9cec25efd384ad as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/87ac6b64e2e342369d9cec25efd384ad 2024-11-20T19:27:18,928 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/87ac6b64e2e342369d9cec25efd384ad, entries=150, sequenceid=439, filesize=12.0 K 2024-11-20T19:27:18,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/b1b29ba894924fe19e2c66f87f135481 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/b1b29ba894924fe19e2c66f87f135481 2024-11-20T19:27:18,935 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/b1b29ba894924fe19e2c66f87f135481, entries=150, sequenceid=439, filesize=12.0 K 2024-11-20T19:27:18,939 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 794552c8bce342231c204cc0e02fbebc in 167ms, sequenceid=439, compaction requested=true 2024-11-20T19:27:18,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:18,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:18,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:18,939 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:18,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:18,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:18,939 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:18,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:18,939 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:18,941 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:18,941 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/A is initiating minor compaction (all files) 2024-11-20T19:27:18,941 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/A in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:18,942 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4ec784b9d890405e8717aed821028640, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/c68d0c588f2340dc8850c4400078cf84, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a3b12fa5a1a549aa95ee60c548857505] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=39.3 K 2024-11-20T19:27:18,942 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:18,942 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/B is initiating minor compaction (all files) 2024-11-20T19:27:18,942 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/B in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:18,942 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/ea461df827d249d28ce0550fd55dfa97, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/90e60deb29834d689ed050becd1e70bd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/87ac6b64e2e342369d9cec25efd384ad] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=36.9 K 2024-11-20T19:27:18,942 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting ea461df827d249d28ce0550fd55dfa97, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732130836794 2024-11-20T19:27:18,942 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ec784b9d890405e8717aed821028640, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732130836794 2024-11-20T19:27:18,943 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 90e60deb29834d689ed050becd1e70bd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732130837936 2024-11-20T19:27:18,943 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting c68d0c588f2340dc8850c4400078cf84, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732130837936 2024-11-20T19:27:18,943 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 87ac6b64e2e342369d9cec25efd384ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732130838133 2024-11-20T19:27:18,943 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3b12fa5a1a549aa95ee60c548857505, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732130838132 2024-11-20T19:27:18,945 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:27:18,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:18,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:18,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:18,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:18,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:18,956 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#B#compaction#516 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:18,956 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/6d5fa825edf34db9b3877ed4ed81386a is 50, key is test_row_0/B:col10/1732130838770/Put/seqid=0 2024-11-20T19:27:18,962 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#A#compaction#517 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:18,962 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/8fb030c10ab049dfa4cb62b1b8bb97e0 is 50, key is test_row_0/A:col10/1732130838770/Put/seqid=0 2024-11-20T19:27:18,968 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/79cefdf85bd34fc58880b88763452b1d is 50, key is test_row_0/A:col10/1732130838944/Put/seqid=0 2024-11-20T19:27:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T19:27:19,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742429_1605 (size=13255) 2024-11-20T19:27:19,003 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,004 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T19:27:19,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:19,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,004 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,006 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/6d5fa825edf34db9b3877ed4ed81386a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6d5fa825edf34db9b3877ed4ed81386a 2024-11-20T19:27:19,011 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/B of 794552c8bce342231c204cc0e02fbebc into 6d5fa825edf34db9b3877ed4ed81386a(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:19,011 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:19,011 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/B, priority=13, startTime=1732130838939; duration=0sec 2024-11-20T19:27:19,011 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:19,011 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:B 2024-11-20T19:27:19,011 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:19,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742430_1606 (size=13255) 2024-11-20T19:27:19,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742431_1607 (size=12301) 2024-11-20T19:27:19,019 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:19,019 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/C is initiating minor compaction (all files) 2024-11-20T19:27:19,019 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/C in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,019 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/8fb030c10ab049dfa4cb62b1b8bb97e0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/8fb030c10ab049dfa4cb62b1b8bb97e0 2024-11-20T19:27:19,019 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/af9baaba0ec348878f6be4e93900c6c2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/08f32b0081854acd9dda47a8113aa0f8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/b1b29ba894924fe19e2c66f87f135481] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=36.9 K 2024-11-20T19:27:19,020 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting af9baaba0ec348878f6be4e93900c6c2, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732130836794 2024-11-20T19:27:19,020 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 08f32b0081854acd9dda47a8113aa0f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732130837936 2024-11-20T19:27:19,021 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting b1b29ba894924fe19e2c66f87f135481, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732130838133 2024-11-20T19:27:19,024 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/A of 794552c8bce342231c204cc0e02fbebc into 8fb030c10ab049dfa4cb62b1b8bb97e0(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:19,024 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:19,024 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/A, priority=13, startTime=1732130838939; duration=0sec 2024-11-20T19:27:19,024 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:19,024 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:A 2024-11-20T19:27:19,027 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/79cefdf85bd34fc58880b88763452b1d 2024-11-20T19:27:19,035 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/93bfe6fea12f47948dd7f14700dda061 is 50, key is test_row_0/B:col10/1732130838944/Put/seqid=0 2024-11-20T19:27:19,036 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#C#compaction#520 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:19,037 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/2c4471b6497142048ff6aa9deb2d9338 is 50, key is test_row_0/C:col10/1732130838770/Put/seqid=0 2024-11-20T19:27:19,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130899037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130899041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742432_1608 (size=12301) 2024-11-20T19:27:19,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/93bfe6fea12f47948dd7f14700dda061 2024-11-20T19:27:19,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742433_1609 (size=13255) 2024-11-20T19:27:19,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/fde096403d0e4be58740488ecb700e2e is 50, key is test_row_0/C:col10/1732130838944/Put/seqid=0 2024-11-20T19:27:19,106 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/2c4471b6497142048ff6aa9deb2d9338 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/2c4471b6497142048ff6aa9deb2d9338 2024-11-20T19:27:19,114 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/C of 794552c8bce342231c204cc0e02fbebc into 2c4471b6497142048ff6aa9deb2d9338(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:19,114 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:19,114 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/C, priority=13, startTime=1732130838939; duration=0sec 2024-11-20T19:27:19,114 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:19,114 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:C 2024-11-20T19:27:19,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742434_1610 (size=12301) 2024-11-20T19:27:19,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130899122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130899123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130899123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,130 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/fde096403d0e4be58740488ecb700e2e 2024-11-20T19:27:19,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/79cefdf85bd34fc58880b88763452b1d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/79cefdf85bd34fc58880b88763452b1d 2024-11-20T19:27:19,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/79cefdf85bd34fc58880b88763452b1d, entries=150, sequenceid=451, filesize=12.0 K 2024-11-20T19:27:19,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/93bfe6fea12f47948dd7f14700dda061 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/93bfe6fea12f47948dd7f14700dda061 2024-11-20T19:27:19,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/93bfe6fea12f47948dd7f14700dda061, entries=150, sequenceid=451, filesize=12.0 K 2024-11-20T19:27:19,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130899145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/fde096403d0e4be58740488ecb700e2e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/fde096403d0e4be58740488ecb700e2e 2024-11-20T19:27:19,154 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/fde096403d0e4be58740488ecb700e2e, entries=150, sequenceid=451, filesize=12.0 K 2024-11-20T19:27:19,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 794552c8bce342231c204cc0e02fbebc in 209ms, sequenceid=451, compaction requested=false 2024-11-20T19:27:19,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:19,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:19,156 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:27:19,156 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,157 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T19:27:19,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:19,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,157 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:19,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:19,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:19,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:19,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:19,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:19,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/9eeae91e830141f78b7001eab008a7ab is 50, key is test_row_0/A:col10/1732130839155/Put/seqid=0 2024-11-20T19:27:19,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742435_1611 (size=14741) 2024-11-20T19:27:19,172 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=479 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/9eeae91e830141f78b7001eab008a7ab 2024-11-20T19:27:19,183 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/27d99720aa71462d81b3e2ac366d9c17 is 50, key is test_row_0/B:col10/1732130839155/Put/seqid=0 2024-11-20T19:27:19,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742436_1612 (size=12301) 2024-11-20T19:27:19,201 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=479 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/27d99720aa71462d81b3e2ac366d9c17 2024-11-20T19:27:19,207 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/896d802a660e41d0bd20dd8fe63d07ad is 50, key is test_row_0/C:col10/1732130839155/Put/seqid=0 2024-11-20T19:27:19,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130899207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742437_1613 (size=12301) 2024-11-20T19:27:19,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T19:27:19,308 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T19:27:19,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:19,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,309 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130899316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130899352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130899429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130899429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130899437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,461 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,462 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T19:27:19,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:19,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,462 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,526 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130899519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,613 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,613 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T19:27:19,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:19,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,613 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,623 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=479 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/896d802a660e41d0bd20dd8fe63d07ad 2024-11-20T19:27:19,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/9eeae91e830141f78b7001eab008a7ab as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/9eeae91e830141f78b7001eab008a7ab 2024-11-20T19:27:19,629 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/9eeae91e830141f78b7001eab008a7ab, entries=200, sequenceid=479, filesize=14.4 K 2024-11-20T19:27:19,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/27d99720aa71462d81b3e2ac366d9c17 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/27d99720aa71462d81b3e2ac366d9c17 2024-11-20T19:27:19,632 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/27d99720aa71462d81b3e2ac366d9c17, entries=150, sequenceid=479, filesize=12.0 K 2024-11-20T19:27:19,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/896d802a660e41d0bd20dd8fe63d07ad as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/896d802a660e41d0bd20dd8fe63d07ad 2024-11-20T19:27:19,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/896d802a660e41d0bd20dd8fe63d07ad, entries=150, sequenceid=479, filesize=12.0 K 2024-11-20T19:27:19,643 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 794552c8bce342231c204cc0e02fbebc in 487ms, sequenceid=479, compaction requested=true 2024-11-20T19:27:19,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:19,643 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:19,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:19,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:19,644 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:19,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:19,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:19,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:19,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:19,644 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40297 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:19,644 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/A is initiating minor compaction (all files) 2024-11-20T19:27:19,644 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/A in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,644 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/8fb030c10ab049dfa4cb62b1b8bb97e0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/79cefdf85bd34fc58880b88763452b1d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/9eeae91e830141f78b7001eab008a7ab] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=39.4 K 2024-11-20T19:27:19,649 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8fb030c10ab049dfa4cb62b1b8bb97e0, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732130838133 2024-11-20T19:27:19,650 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79cefdf85bd34fc58880b88763452b1d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732130838804 2024-11-20T19:27:19,650 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:19,650 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/B is initiating minor compaction (all files) 2024-11-20T19:27:19,650 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/B in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,650 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6d5fa825edf34db9b3877ed4ed81386a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/93bfe6fea12f47948dd7f14700dda061, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/27d99720aa71462d81b3e2ac366d9c17] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=37.0 K 2024-11-20T19:27:19,650 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9eeae91e830141f78b7001eab008a7ab, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=479, earliestPutTs=1732130839031 2024-11-20T19:27:19,650 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d5fa825edf34db9b3877ed4ed81386a, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732130838133 2024-11-20T19:27:19,650 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 93bfe6fea12f47948dd7f14700dda061, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732130838804 2024-11-20T19:27:19,651 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 27d99720aa71462d81b3e2ac366d9c17, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=479, earliestPutTs=1732130839036 2024-11-20T19:27:19,658 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#A#compaction#525 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:19,658 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/476fddb541c04b5c8f8ccf2b03787c59 is 50, key is test_row_0/A:col10/1732130839155/Put/seqid=0 2024-11-20T19:27:19,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:19,665 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:27:19,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:19,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:19,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:19,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:19,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:19,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:19,668 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#B#compaction#526 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:19,669 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/7c906416081f40e299545bb33fd0d7ec is 50, key is test_row_0/B:col10/1732130839155/Put/seqid=0 2024-11-20T19:27:19,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/c856325c50f24bc9af19bf1072d87609 is 50, key is test_row_0/A:col10/1732130839203/Put/seqid=0 2024-11-20T19:27:19,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742438_1614 (size=13357) 2024-11-20T19:27:19,729 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/476fddb541c04b5c8f8ccf2b03787c59 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/476fddb541c04b5c8f8ccf2b03787c59 2024-11-20T19:27:19,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742439_1615 (size=13357) 2024-11-20T19:27:19,735 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/A of 794552c8bce342231c204cc0e02fbebc into 476fddb541c04b5c8f8ccf2b03787c59(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:19,735 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:19,735 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/A, priority=13, startTime=1732130839643; duration=0sec 2024-11-20T19:27:19,735 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:19,735 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:A 2024-11-20T19:27:19,736 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:19,737 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:19,737 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/C is initiating minor compaction (all files) 2024-11-20T19:27:19,737 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/C in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,737 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/2c4471b6497142048ff6aa9deb2d9338, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/fde096403d0e4be58740488ecb700e2e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/896d802a660e41d0bd20dd8fe63d07ad] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=37.0 K 2024-11-20T19:27:19,738 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c4471b6497142048ff6aa9deb2d9338, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=439, earliestPutTs=1732130838133 2024-11-20T19:27:19,738 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting fde096403d0e4be58740488ecb700e2e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1732130838804 2024-11-20T19:27:19,739 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 896d802a660e41d0bd20dd8fe63d07ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=479, earliestPutTs=1732130839036 2024-11-20T19:27:19,740 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/7c906416081f40e299545bb33fd0d7ec as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/7c906416081f40e299545bb33fd0d7ec 2024-11-20T19:27:19,747 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/B of 794552c8bce342231c204cc0e02fbebc into 7c906416081f40e299545bb33fd0d7ec(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:19,747 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:19,747 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/B, priority=13, startTime=1732130839644; duration=0sec 2024-11-20T19:27:19,747 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:19,747 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:B 2024-11-20T19:27:19,756 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#C#compaction#528 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:19,757 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/6c5cd6f3248e4edabcdf8624d0b339c1 is 50, key is test_row_0/C:col10/1732130839155/Put/seqid=0 2024-11-20T19:27:19,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742440_1616 (size=14741) 2024-11-20T19:27:19,760 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=491 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/c856325c50f24bc9af19bf1072d87609 2024-11-20T19:27:19,765 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,766 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T19:27:19,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:19,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,766 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:19,779 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/6d1570475810446c9527e11bf620f21e is 50, key is test_row_0/B:col10/1732130839203/Put/seqid=0 2024-11-20T19:27:19,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742442_1618 (size=12301) 2024-11-20T19:27:19,788 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=491 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/6d1570475810446c9527e11bf620f21e 2024-11-20T19:27:19,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742441_1617 (size=13357) 2024-11-20T19:27:19,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T19:27:19,802 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/6c5cd6f3248e4edabcdf8624d0b339c1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6c5cd6f3248e4edabcdf8624d0b339c1 2024-11-20T19:27:19,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/18c448acec00404b85c5b602ed65a84e is 50, key is test_row_0/C:col10/1732130839203/Put/seqid=0 2024-11-20T19:27:19,820 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/C of 794552c8bce342231c204cc0e02fbebc into 6c5cd6f3248e4edabcdf8624d0b339c1(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:19,820 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:19,820 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/C, priority=13, startTime=1732130839644; duration=0sec 2024-11-20T19:27:19,820 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:19,820 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:C 2024-11-20T19:27:19,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130899841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742443_1619 (size=12301) 2024-11-20T19:27:19,852 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=491 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/18c448acec00404b85c5b602ed65a84e 2024-11-20T19:27:19,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130899847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/c856325c50f24bc9af19bf1072d87609 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/c856325c50f24bc9af19bf1072d87609 2024-11-20T19:27:19,864 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/c856325c50f24bc9af19bf1072d87609, entries=200, sequenceid=491, filesize=14.4 K 2024-11-20T19:27:19,866 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/6d1570475810446c9527e11bf620f21e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6d1570475810446c9527e11bf620f21e 2024-11-20T19:27:19,871 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6d1570475810446c9527e11bf620f21e, entries=150, sequenceid=491, filesize=12.0 K 2024-11-20T19:27:19,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/18c448acec00404b85c5b602ed65a84e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/18c448acec00404b85c5b602ed65a84e 2024-11-20T19:27:19,875 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/18c448acec00404b85c5b602ed65a84e, entries=150, sequenceid=491, filesize=12.0 K 2024-11-20T19:27:19,876 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 794552c8bce342231c204cc0e02fbebc in 211ms, sequenceid=491, compaction requested=false 2024-11-20T19:27:19,876 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:19,918 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,918 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-20T19:27:19,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:19,919 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T19:27:19,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:19,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:19,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:19,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:19,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:19,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:19,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/1bc09f6e89ef4e5e8dcc2c926f348bec is 50, key is test_row_0/A:col10/1732130839838/Put/seqid=0 2024-11-20T19:27:19,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:19,958 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:19,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742444_1620 (size=12301) 2024-11-20T19:27:19,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130899961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130899974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130899975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,991 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130899977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:19,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:19,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130899983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130900077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130900088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130900092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130900092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130900097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130900285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130900295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130900299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130900299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130900305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,360 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=518 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/1bc09f6e89ef4e5e8dcc2c926f348bec 2024-11-20T19:27:20,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/312a04e5614b4a2685bdf02a2dbbb424 is 50, key is test_row_0/B:col10/1732130839838/Put/seqid=0 2024-11-20T19:27:20,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742445_1621 (size=12301) 2024-11-20T19:27:20,385 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=518 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/312a04e5614b4a2685bdf02a2dbbb424 2024-11-20T19:27:20,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/fd1c81c87153438e98fd463f484cb856 is 50, key is test_row_0/C:col10/1732130839838/Put/seqid=0 2024-11-20T19:27:20,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742446_1622 (size=12301) 2024-11-20T19:27:20,428 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=518 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/fd1c81c87153438e98fd463f484cb856 2024-11-20T19:27:20,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/1bc09f6e89ef4e5e8dcc2c926f348bec as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1bc09f6e89ef4e5e8dcc2c926f348bec 2024-11-20T19:27:20,438 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1bc09f6e89ef4e5e8dcc2c926f348bec, entries=150, sequenceid=518, filesize=12.0 K 2024-11-20T19:27:20,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/312a04e5614b4a2685bdf02a2dbbb424 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/312a04e5614b4a2685bdf02a2dbbb424 2024-11-20T19:27:20,442 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/312a04e5614b4a2685bdf02a2dbbb424, entries=150, sequenceid=518, filesize=12.0 K 2024-11-20T19:27:20,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/fd1c81c87153438e98fd463f484cb856 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/fd1c81c87153438e98fd463f484cb856 2024-11-20T19:27:20,447 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/fd1c81c87153438e98fd463f484cb856, entries=150, sequenceid=518, filesize=12.0 K 2024-11-20T19:27:20,447 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 794552c8bce342231c204cc0e02fbebc in 529ms, sequenceid=518, compaction requested=true 2024-11-20T19:27:20,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:20,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:20,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-20T19:27:20,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-20T19:27:20,451 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-20T19:27:20,451 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7510 sec 2024-11-20T19:27:20,452 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.7610 sec 2024-11-20T19:27:20,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:20,596 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:27:20,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:20,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:20,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:20,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:20,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:20,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:20,601 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/2d9a1c9ecb264e4fb6d2c9d2a990ad3c is 50, key is test_row_0/A:col10/1732130840595/Put/seqid=0 2024-11-20T19:27:20,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742447_1623 (size=14741) 2024-11-20T19:27:20,616 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/2d9a1c9ecb264e4fb6d2c9d2a990ad3c 2024-11-20T19:27:20,625 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/ddf76dc0bf7f40bf910857c5aaf78499 is 50, key is test_row_0/B:col10/1732130840595/Put/seqid=0 2024-11-20T19:27:20,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742448_1624 (size=12301) 2024-11-20T19:27:20,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/ddf76dc0bf7f40bf910857c5aaf78499 2024-11-20T19:27:20,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/ec36feb49ca4493f84d004464448a243 is 50, key is test_row_0/C:col10/1732130840595/Put/seqid=0 2024-11-20T19:27:20,654 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130900642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130900643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130900647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130900648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742449_1625 (size=12301) 2024-11-20T19:27:20,664 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=531 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/ec36feb49ca4493f84d004464448a243 2024-11-20T19:27:20,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130900656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/2d9a1c9ecb264e4fb6d2c9d2a990ad3c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/2d9a1c9ecb264e4fb6d2c9d2a990ad3c 2024-11-20T19:27:20,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/2d9a1c9ecb264e4fb6d2c9d2a990ad3c, entries=200, sequenceid=531, filesize=14.4 K 2024-11-20T19:27:20,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/ddf76dc0bf7f40bf910857c5aaf78499 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/ddf76dc0bf7f40bf910857c5aaf78499 2024-11-20T19:27:20,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/ddf76dc0bf7f40bf910857c5aaf78499, entries=150, sequenceid=531, filesize=12.0 K 2024-11-20T19:27:20,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/ec36feb49ca4493f84d004464448a243 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/ec36feb49ca4493f84d004464448a243 2024-11-20T19:27:20,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/ec36feb49ca4493f84d004464448a243, entries=150, sequenceid=531, filesize=12.0 K 2024-11-20T19:27:20,687 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 794552c8bce342231c204cc0e02fbebc in 91ms, sequenceid=531, compaction requested=true 2024-11-20T19:27:20,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:20,688 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:20,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:20,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:20,688 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:20,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:20,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:20,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:20,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:20,693 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50260 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:20,693 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/B is initiating minor compaction (all files) 2024-11-20T19:27:20,693 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/B in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:20,693 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/7c906416081f40e299545bb33fd0d7ec, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6d1570475810446c9527e11bf620f21e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/312a04e5614b4a2685bdf02a2dbbb424, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/ddf76dc0bf7f40bf910857c5aaf78499] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=49.1 K 2024-11-20T19:27:20,693 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55140 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:20,693 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/A is initiating minor compaction (all files) 2024-11-20T19:27:20,693 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/A in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:20,693 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/476fddb541c04b5c8f8ccf2b03787c59, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/c856325c50f24bc9af19bf1072d87609, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1bc09f6e89ef4e5e8dcc2c926f348bec, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/2d9a1c9ecb264e4fb6d2c9d2a990ad3c] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=53.8 K 2024-11-20T19:27:20,694 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 476fddb541c04b5c8f8ccf2b03787c59, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=479, earliestPutTs=1732130839036 2024-11-20T19:27:20,694 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c906416081f40e299545bb33fd0d7ec, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=479, earliestPutTs=1732130839036 2024-11-20T19:27:20,694 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting c856325c50f24bc9af19bf1072d87609, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=491, earliestPutTs=1732130839193 2024-11-20T19:27:20,694 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d1570475810446c9527e11bf620f21e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=491, earliestPutTs=1732130839196 2024-11-20T19:27:20,694 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bc09f6e89ef4e5e8dcc2c926f348bec, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=518, earliestPutTs=1732130839838 2024-11-20T19:27:20,694 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 312a04e5614b4a2685bdf02a2dbbb424, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=518, earliestPutTs=1732130839838 2024-11-20T19:27:20,695 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting ddf76dc0bf7f40bf910857c5aaf78499, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=531, earliestPutTs=1732130839959 2024-11-20T19:27:20,695 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d9a1c9ecb264e4fb6d2c9d2a990ad3c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=531, earliestPutTs=1732130839959 2024-11-20T19:27:20,729 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#A#compaction#537 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:20,730 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/e420459afe4a4938a98756e44a12bee5 is 50, key is test_row_0/A:col10/1732130840595/Put/seqid=0 2024-11-20T19:27:20,733 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#B#compaction#538 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:20,733 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/99cdddd7f6f34c7793236f49559e4a36 is 50, key is test_row_0/B:col10/1732130840595/Put/seqid=0 2024-11-20T19:27:20,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742451_1627 (size=13493) 2024-11-20T19:27:20,764 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T19:27:20,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:20,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:20,764 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:20,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:20,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:20,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:20,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:20,773 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/e420459afe4a4938a98756e44a12bee5 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/e420459afe4a4938a98756e44a12bee5 2024-11-20T19:27:20,777 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/A of 794552c8bce342231c204cc0e02fbebc into e420459afe4a4938a98756e44a12bee5(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:20,777 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:20,777 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/A, priority=12, startTime=1732130840688; duration=0sec 2024-11-20T19:27:20,777 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:20,777 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:A 2024-11-20T19:27:20,778 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:20,781 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50260 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:20,781 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): 794552c8bce342231c204cc0e02fbebc/C is initiating minor compaction (all files) 2024-11-20T19:27:20,781 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 794552c8bce342231c204cc0e02fbebc/C in TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:20,782 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6c5cd6f3248e4edabcdf8624d0b339c1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/18c448acec00404b85c5b602ed65a84e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/fd1c81c87153438e98fd463f484cb856, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/ec36feb49ca4493f84d004464448a243] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp, totalSize=49.1 K 2024-11-20T19:27:20,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742450_1626 (size=13493) 2024-11-20T19:27:20,790 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c5cd6f3248e4edabcdf8624d0b339c1, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=479, earliestPutTs=1732130839036 2024-11-20T19:27:20,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/f0c81454babb49848132f3b220f3c208 is 50, key is test_row_0/A:col10/1732130840645/Put/seqid=0 2024-11-20T19:27:20,793 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18c448acec00404b85c5b602ed65a84e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=491, earliestPutTs=1732130839196 2024-11-20T19:27:20,793 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd1c81c87153438e98fd463f484cb856, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=518, earliestPutTs=1732130839838 2024-11-20T19:27:20,794 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec36feb49ca4493f84d004464448a243, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=531, earliestPutTs=1732130839959 2024-11-20T19:27:20,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-20T19:27:20,799 INFO [Thread-2263 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-20T19:27:20,800 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:20,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-11-20T19:27:20,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T19:27:20,801 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:20,802 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:20,802 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:20,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742452_1628 (size=14741) 2024-11-20T19:27:20,817 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 794552c8bce342231c204cc0e02fbebc#C#compaction#540 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:20,817 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/6cde54406155436d81bd8ecf28a87d80 is 50, key is test_row_0/C:col10/1732130840595/Put/seqid=0 2024-11-20T19:27:20,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742453_1629 (size=13493) 2024-11-20T19:27:20,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130900781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130900844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130900847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130900848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130900855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T19:27:20,955 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,955 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-20T19:27:20,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130900948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:20,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:20,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:20,956 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:20,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:20,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:20,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130900966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130900966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130900966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:20,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:20,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130900969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:21,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T19:27:21,108 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:21,109 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-20T19:27:21,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:21,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:21,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:21,109 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130901158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:21,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130901175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:21,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130901175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:21,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130901176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:21,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130901177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:21,194 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/99cdddd7f6f34c7793236f49559e4a36 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/99cdddd7f6f34c7793236f49559e4a36 2024-11-20T19:27:21,200 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/B of 794552c8bce342231c204cc0e02fbebc into 99cdddd7f6f34c7793236f49559e4a36(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:21,200 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:21,200 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/B, priority=12, startTime=1732130840688; duration=0sec 2024-11-20T19:27:21,200 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:21,200 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:B 2024-11-20T19:27:21,214 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/f0c81454babb49848132f3b220f3c208 2024-11-20T19:27:21,224 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/68e581547ace45ebae09cfaf42f77aff is 50, key is test_row_0/B:col10/1732130840645/Put/seqid=0 2024-11-20T19:27:21,233 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/6cde54406155436d81bd8ecf28a87d80 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6cde54406155436d81bd8ecf28a87d80 2024-11-20T19:27:21,237 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 794552c8bce342231c204cc0e02fbebc/C of 794552c8bce342231c204cc0e02fbebc into 6cde54406155436d81bd8ecf28a87d80(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:21,237 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:21,237 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc., storeName=794552c8bce342231c204cc0e02fbebc/C, priority=12, startTime=1732130840688; duration=0sec 2024-11-20T19:27:21,237 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:21,237 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:C 2024-11-20T19:27:21,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742454_1630 (size=12301) 2024-11-20T19:27:21,261 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:21,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-20T19:27:21,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:21,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:21,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:21,262 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T19:27:21,414 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:21,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-20T19:27:21,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:21,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:21,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:21,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130901464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:21,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130901482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:21,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130901484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:21,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130901484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:21,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:21,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130901485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:21,567 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:21,567 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-20T19:27:21,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:21,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:21,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:21,568 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:21,651 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/68e581547ace45ebae09cfaf42f77aff 2024-11-20T19:27:21,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/d29487f945bd433fb811068a18fcf618 is 50, key is test_row_0/C:col10/1732130840645/Put/seqid=0 2024-11-20T19:27:21,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742455_1631 (size=12301) 2024-11-20T19:27:21,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=555 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/d29487f945bd433fb811068a18fcf618 2024-11-20T19:27:21,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/f0c81454babb49848132f3b220f3c208 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/f0c81454babb49848132f3b220f3c208 2024-11-20T19:27:21,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/f0c81454babb49848132f3b220f3c208, entries=200, sequenceid=555, filesize=14.4 K 2024-11-20T19:27:21,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/68e581547ace45ebae09cfaf42f77aff as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/68e581547ace45ebae09cfaf42f77aff 2024-11-20T19:27:21,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/68e581547ace45ebae09cfaf42f77aff, entries=150, sequenceid=555, filesize=12.0 K 2024-11-20T19:27:21,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/d29487f945bd433fb811068a18fcf618 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/d29487f945bd433fb811068a18fcf618 2024-11-20T19:27:21,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/d29487f945bd433fb811068a18fcf618, entries=150, sequenceid=555, filesize=12.0 K 2024-11-20T19:27:21,705 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 794552c8bce342231c204cc0e02fbebc in 941ms, sequenceid=555, compaction requested=false 2024-11-20T19:27:21,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:21,720 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:21,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-20T19:27:21,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:21,720 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:27:21,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:21,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:21,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:21,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:21,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:21,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:21,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/dec3fa362135486c8775f62ef14744eb is 50, key is test_row_0/A:col10/1732130840843/Put/seqid=0 2024-11-20T19:27:21,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742456_1632 (size=12301) 2024-11-20T19:27:21,753 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=571 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/dec3fa362135486c8775f62ef14744eb 2024-11-20T19:27:21,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/a8e85be6e14e49f7b7ed335baf54a323 is 50, key is test_row_0/B:col10/1732130840843/Put/seqid=0 2024-11-20T19:27:21,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742457_1633 (size=12301) 2024-11-20T19:27:21,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T19:27:21,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. as already flushing 2024-11-20T19:27:21,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:22,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130902019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130902020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130902025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130902031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130902032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130902132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130902133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130902134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,147 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130902141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130902142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,209 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=571 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/a8e85be6e14e49f7b7ed335baf54a323 2024-11-20T19:27:22,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/63943288a77a48598c7bb73f0a9d3ec8 is 50, key is test_row_0/C:col10/1732130840843/Put/seqid=0 2024-11-20T19:27:22,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742458_1634 (size=12301) 2024-11-20T19:27:22,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130902339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130902340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130902345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130902349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130902350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,637 DEBUG [Thread-2270 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x184771cf to 127.0.0.1:50476 2024-11-20T19:27:22,637 DEBUG [Thread-2270 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:22,640 DEBUG [Thread-2264 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f7c40ba to 127.0.0.1:50476 2024-11-20T19:27:22,640 DEBUG [Thread-2264 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:22,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47234 deadline: 1732130902640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,641 DEBUG [Thread-2268 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0f2423f3 to 127.0.0.1:50476 2024-11-20T19:27:22,642 DEBUG [Thread-2268 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:22,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47294 deadline: 1732130902643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,644 DEBUG [Thread-2266 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x41b0e7b6 to 127.0.0.1:50476 2024-11-20T19:27:22,644 DEBUG [Thread-2266 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:22,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47230 deadline: 1732130902649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,650 DEBUG [Thread-2272 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x076f0408 to 127.0.0.1:50476 2024-11-20T19:27:22,650 DEBUG [Thread-2272 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:22,652 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=571 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/63943288a77a48598c7bb73f0a9d3ec8 2024-11-20T19:27:22,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47250 deadline: 1732130902655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/dec3fa362135486c8775f62ef14744eb as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/dec3fa362135486c8775f62ef14744eb 2024-11-20T19:27:22,659 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/dec3fa362135486c8775f62ef14744eb, entries=150, sequenceid=571, filesize=12.0 K 2024-11-20T19:27:22,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/a8e85be6e14e49f7b7ed335baf54a323 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/a8e85be6e14e49f7b7ed335baf54a323 2024-11-20T19:27:22,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:22,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47326 deadline: 1732130902661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:22,663 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/a8e85be6e14e49f7b7ed335baf54a323, entries=150, sequenceid=571, filesize=12.0 K 2024-11-20T19:27:22,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/63943288a77a48598c7bb73f0a9d3ec8 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/63943288a77a48598c7bb73f0a9d3ec8 2024-11-20T19:27:22,666 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/63943288a77a48598c7bb73f0a9d3ec8, entries=150, sequenceid=571, filesize=12.0 K 2024-11-20T19:27:22,667 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 794552c8bce342231c204cc0e02fbebc in 947ms, sequenceid=571, compaction requested=true 2024-11-20T19:27:22,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:22,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:22,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-11-20T19:27:22,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-11-20T19:27:22,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-20T19:27:22,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8660 sec 2024-11-20T19:27:22,670 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 1.8690 sec 2024-11-20T19:27:22,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-20T19:27:22,905 INFO [Thread-2263 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-20T19:27:23,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:23,146 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:27:23,146 DEBUG [Thread-2259 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b0c2472 to 127.0.0.1:50476 2024-11-20T19:27:23,146 DEBUG [Thread-2259 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:23,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:23,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:23,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:23,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:23,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:23,146 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:23,149 DEBUG [Thread-2257 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7043f683 to 127.0.0.1:50476 2024-11-20T19:27:23,149 DEBUG [Thread-2257 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:23,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/ad12ab8f07834972b47930e91d4fdb01 is 50, key is test_row_0/A:col10/1732130842023/Put/seqid=0 2024-11-20T19:27:23,155 DEBUG [Thread-2253 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7177efc9 to 127.0.0.1:50476 2024-11-20T19:27:23,155 DEBUG [Thread-2253 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:23,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742459_1635 (size=12301) 2024-11-20T19:27:23,164 DEBUG [Thread-2261 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34b30c39 to 127.0.0.1:50476 2024-11-20T19:27:23,164 DEBUG [Thread-2261 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:23,167 DEBUG [Thread-2255 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61d38088 to 127.0.0.1:50476 2024-11-20T19:27:23,167 DEBUG [Thread-2255 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 84 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 55 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 116 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 79 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 139 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1904 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5712 rows 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1900 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5700 rows 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1905 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5715 rows 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1904 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5712 rows 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1914 2024-11-20T19:27:23,168 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5742 rows 2024-11-20T19:27:23,168 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T19:27:23,168 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x695c2253 to 127.0.0.1:50476 2024-11-20T19:27:23,168 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:23,171 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T19:27:23,172 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T19:27:23,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:23,175 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130843174"}]},"ts":"1732130843174"} 2024-11-20T19:27:23,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T19:27:23,176 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T19:27:23,200 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T19:27:23,200 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:27:23,202 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=794552c8bce342231c204cc0e02fbebc, UNASSIGN}] 2024-11-20T19:27:23,202 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=794552c8bce342231c204cc0e02fbebc, UNASSIGN 2024-11-20T19:27:23,203 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=794552c8bce342231c204cc0e02fbebc, regionState=CLOSING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:23,203 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:27:23,203 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; CloseRegionProcedure 794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:27:23,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T19:27:23,354 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:23,355 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(124): Close 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:23,355 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:27:23,355 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1681): Closing 794552c8bce342231c204cc0e02fbebc, disabling compactions & flushes 2024-11-20T19:27:23,355 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:23,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T19:27:23,560 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=596 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/ad12ab8f07834972b47930e91d4fdb01 2024-11-20T19:27:23,594 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/1f140f37320e453baf772b724f844f58 is 50, key is test_row_0/B:col10/1732130842023/Put/seqid=0 2024-11-20T19:27:23,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742460_1636 (size=12301) 2024-11-20T19:27:23,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T19:27:23,997 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=596 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/1f140f37320e453baf772b724f844f58 2024-11-20T19:27:24,003 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/654fc028b34c402db91b546b69ec03ac is 50, key is test_row_0/C:col10/1732130842023/Put/seqid=0 2024-11-20T19:27:24,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742461_1637 (size=12301) 2024-11-20T19:27:24,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T19:27:24,407 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=596 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/654fc028b34c402db91b546b69ec03ac 2024-11-20T19:27:24,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/ad12ab8f07834972b47930e91d4fdb01 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/ad12ab8f07834972b47930e91d4fdb01 2024-11-20T19:27:24,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/ad12ab8f07834972b47930e91d4fdb01, entries=150, sequenceid=596, filesize=12.0 K 2024-11-20T19:27:24,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/1f140f37320e453baf772b724f844f58 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/1f140f37320e453baf772b724f844f58 2024-11-20T19:27:24,415 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/1f140f37320e453baf772b724f844f58, entries=150, sequenceid=596, filesize=12.0 K 2024-11-20T19:27:24,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/654fc028b34c402db91b546b69ec03ac as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/654fc028b34c402db91b546b69ec03ac 2024-11-20T19:27:24,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/654fc028b34c402db91b546b69ec03ac, entries=150, sequenceid=596, filesize=12.0 K 2024-11-20T19:27:24,418 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=26.84 KB/27480 for 794552c8bce342231c204cc0e02fbebc in 1272ms, sequenceid=596, compaction requested=true 2024-11-20T19:27:24,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:24,418 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:24,418 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. because compaction request was cancelled 2024-11-20T19:27:24,418 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:24,418 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:A 2024-11-20T19:27:24,418 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. after waiting 0 ms 2024-11-20T19:27:24,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:A, priority=-2147483648, current under compaction store size is 0 2024-11-20T19:27:24,418 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:24,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:24,418 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. because compaction request was cancelled 2024-11-20T19:27:24,418 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:B 2024-11-20T19:27:24,418 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(2837): Flushing 794552c8bce342231c204cc0e02fbebc 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T19:27:24,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:B, priority=-2147483648, current under compaction store size is 0 2024-11-20T19:27:24,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:24,419 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. because compaction request was cancelled 2024-11-20T19:27:24,419 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 794552c8bce342231c204cc0e02fbebc:C 2024-11-20T19:27:24,419 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=A 2024-11-20T19:27:24,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 794552c8bce342231c204cc0e02fbebc:C, priority=-2147483648, current under compaction store size is 0 2024-11-20T19:27:24,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:24,419 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:24,419 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=B 2024-11-20T19:27:24,419 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:24,419 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 794552c8bce342231c204cc0e02fbebc, store=C 2024-11-20T19:27:24,419 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:24,422 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/220d2bcc1c50442cac0371758421f783 is 50, key is test_row_0/A:col10/1732130843165/Put/seqid=0 2024-11-20T19:27:24,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742462_1638 (size=9857) 2024-11-20T19:27:24,826 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=603 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/220d2bcc1c50442cac0371758421f783 2024-11-20T19:27:24,831 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/739c3e6bdefb4d9daf45b775e9d02271 is 50, key is test_row_0/B:col10/1732130843165/Put/seqid=0 2024-11-20T19:27:24,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742463_1639 (size=9857) 2024-11-20T19:27:25,235 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=603 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/739c3e6bdefb4d9daf45b775e9d02271 2024-11-20T19:27:25,239 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/cfb7750847ac43fca0e663d2bf07ffe0 is 50, key is test_row_0/C:col10/1732130843165/Put/seqid=0 2024-11-20T19:27:25,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742464_1640 (size=9857) 2024-11-20T19:27:25,242 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=603 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/cfb7750847ac43fca0e663d2bf07ffe0 2024-11-20T19:27:25,245 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/A/220d2bcc1c50442cac0371758421f783 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/220d2bcc1c50442cac0371758421f783 2024-11-20T19:27:25,248 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/220d2bcc1c50442cac0371758421f783, entries=100, sequenceid=603, filesize=9.6 K 2024-11-20T19:27:25,248 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/B/739c3e6bdefb4d9daf45b775e9d02271 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/739c3e6bdefb4d9daf45b775e9d02271 2024-11-20T19:27:25,258 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/739c3e6bdefb4d9daf45b775e9d02271, entries=100, sequenceid=603, filesize=9.6 K 2024-11-20T19:27:25,259 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/.tmp/C/cfb7750847ac43fca0e663d2bf07ffe0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/cfb7750847ac43fca0e663d2bf07ffe0 2024-11-20T19:27:25,261 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/cfb7750847ac43fca0e663d2bf07ffe0, entries=100, sequenceid=603, filesize=9.6 K 2024-11-20T19:27:25,262 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 794552c8bce342231c204cc0e02fbebc in 844ms, sequenceid=603, compaction requested=true 2024-11-20T19:27:25,263 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/df366801d01f44acae6ad5b0c945e106, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/df18074a3f9e4f85bf5d084caac04a7c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a8c3fa3504904917bbf8891729fc45d9, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/ee9e762c86964714ba5c476e0a91454d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/01a257cfaae74dbabac90bdfbe4d3c43, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/5357bf0f69ce4bb087aaf1c26d8880a5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/580504fe54ba43a7af3e0681f7989e46, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/5fe710095d3e43658659f8f1fd3fd043, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/55d2dc7e77c74831b47790132f5b185e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1f37c8ae522445df98c7fa037ea7d169, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/7c76f98d2a1f4416b75a868a02886309, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4ebf316359dd4197b1893e1fd74c431f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/8f2f1adaf5004771a4791df127b96aa2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4a36aeb844fd4aa68e62b7fb9a70ac3d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/fd798b8893384baea82b19ff7a475fb8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a1a0bdf60c414b70939a330927ac16a5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/fcf53cc61b5e444b96531d91f78f1f88, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/808a8c692a5e4839bdeeb75687666aa7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/60601ba4860345bb9bd497a297429ca7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/47ef50167c3c4d60adc4459bd6b25f55, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/7275bbcac7514c94be952ad58d955938, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/40b60e280c2247a3b0f6f5283b447d08, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/b2787bee6c874178b0cf237f09899236, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/40579fa6e69d45848edeb38873ddee20, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1e8d3999da5f482ab608564b44d24bd9, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4ec784b9d890405e8717aed821028640, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/c68d0c588f2340dc8850c4400078cf84, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a3b12fa5a1a549aa95ee60c548857505, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/8fb030c10ab049dfa4cb62b1b8bb97e0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/79cefdf85bd34fc58880b88763452b1d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/9eeae91e830141f78b7001eab008a7ab, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/476fddb541c04b5c8f8ccf2b03787c59, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/c856325c50f24bc9af19bf1072d87609, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1bc09f6e89ef4e5e8dcc2c926f348bec, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/2d9a1c9ecb264e4fb6d2c9d2a990ad3c] to archive 2024-11-20T19:27:25,265 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:25,266 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/df366801d01f44acae6ad5b0c945e106 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/df366801d01f44acae6ad5b0c945e106 2024-11-20T19:27:25,267 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/df18074a3f9e4f85bf5d084caac04a7c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/df18074a3f9e4f85bf5d084caac04a7c 2024-11-20T19:27:25,268 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a8c3fa3504904917bbf8891729fc45d9 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a8c3fa3504904917bbf8891729fc45d9 2024-11-20T19:27:25,269 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/ee9e762c86964714ba5c476e0a91454d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/ee9e762c86964714ba5c476e0a91454d 2024-11-20T19:27:25,269 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/01a257cfaae74dbabac90bdfbe4d3c43 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/01a257cfaae74dbabac90bdfbe4d3c43 2024-11-20T19:27:25,270 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/5357bf0f69ce4bb087aaf1c26d8880a5 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/5357bf0f69ce4bb087aaf1c26d8880a5 2024-11-20T19:27:25,271 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/580504fe54ba43a7af3e0681f7989e46 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/580504fe54ba43a7af3e0681f7989e46 2024-11-20T19:27:25,272 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/5fe710095d3e43658659f8f1fd3fd043 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/5fe710095d3e43658659f8f1fd3fd043 2024-11-20T19:27:25,274 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/55d2dc7e77c74831b47790132f5b185e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/55d2dc7e77c74831b47790132f5b185e 2024-11-20T19:27:25,275 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1f37c8ae522445df98c7fa037ea7d169 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1f37c8ae522445df98c7fa037ea7d169 2024-11-20T19:27:25,276 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/7c76f98d2a1f4416b75a868a02886309 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/7c76f98d2a1f4416b75a868a02886309 2024-11-20T19:27:25,276 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4ebf316359dd4197b1893e1fd74c431f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4ebf316359dd4197b1893e1fd74c431f 2024-11-20T19:27:25,277 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/8f2f1adaf5004771a4791df127b96aa2 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/8f2f1adaf5004771a4791df127b96aa2 2024-11-20T19:27:25,278 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4a36aeb844fd4aa68e62b7fb9a70ac3d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4a36aeb844fd4aa68e62b7fb9a70ac3d 2024-11-20T19:27:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T19:27:25,279 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/fd798b8893384baea82b19ff7a475fb8 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/fd798b8893384baea82b19ff7a475fb8 2024-11-20T19:27:25,279 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a1a0bdf60c414b70939a330927ac16a5 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a1a0bdf60c414b70939a330927ac16a5 2024-11-20T19:27:25,280 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/fcf53cc61b5e444b96531d91f78f1f88 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/fcf53cc61b5e444b96531d91f78f1f88 2024-11-20T19:27:25,281 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/808a8c692a5e4839bdeeb75687666aa7 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/808a8c692a5e4839bdeeb75687666aa7 2024-11-20T19:27:25,283 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/60601ba4860345bb9bd497a297429ca7 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/60601ba4860345bb9bd497a297429ca7 2024-11-20T19:27:25,284 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/47ef50167c3c4d60adc4459bd6b25f55 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/47ef50167c3c4d60adc4459bd6b25f55 2024-11-20T19:27:25,284 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/7275bbcac7514c94be952ad58d955938 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/7275bbcac7514c94be952ad58d955938 2024-11-20T19:27:25,285 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/40b60e280c2247a3b0f6f5283b447d08 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/40b60e280c2247a3b0f6f5283b447d08 2024-11-20T19:27:25,286 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/b2787bee6c874178b0cf237f09899236 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/b2787bee6c874178b0cf237f09899236 2024-11-20T19:27:25,286 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/40579fa6e69d45848edeb38873ddee20 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/40579fa6e69d45848edeb38873ddee20 2024-11-20T19:27:25,287 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1e8d3999da5f482ab608564b44d24bd9 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1e8d3999da5f482ab608564b44d24bd9 2024-11-20T19:27:25,289 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4ec784b9d890405e8717aed821028640 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/4ec784b9d890405e8717aed821028640 2024-11-20T19:27:25,290 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/c68d0c588f2340dc8850c4400078cf84 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/c68d0c588f2340dc8850c4400078cf84 2024-11-20T19:27:25,291 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a3b12fa5a1a549aa95ee60c548857505 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/a3b12fa5a1a549aa95ee60c548857505 2024-11-20T19:27:25,292 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/8fb030c10ab049dfa4cb62b1b8bb97e0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/8fb030c10ab049dfa4cb62b1b8bb97e0 2024-11-20T19:27:25,293 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/79cefdf85bd34fc58880b88763452b1d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/79cefdf85bd34fc58880b88763452b1d 2024-11-20T19:27:25,295 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/9eeae91e830141f78b7001eab008a7ab to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/9eeae91e830141f78b7001eab008a7ab 2024-11-20T19:27:25,296 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/476fddb541c04b5c8f8ccf2b03787c59 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/476fddb541c04b5c8f8ccf2b03787c59 2024-11-20T19:27:25,297 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/c856325c50f24bc9af19bf1072d87609 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/c856325c50f24bc9af19bf1072d87609 2024-11-20T19:27:25,298 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1bc09f6e89ef4e5e8dcc2c926f348bec to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/1bc09f6e89ef4e5e8dcc2c926f348bec 2024-11-20T19:27:25,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/2d9a1c9ecb264e4fb6d2c9d2a990ad3c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/2d9a1c9ecb264e4fb6d2c9d2a990ad3c 2024-11-20T19:27:25,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/81e8c419af3945f2b59e631157a79cb8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/2a16f158a4c343618c5c6244b173e9ac, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/239d9b9ea1d840a1b8e448f909da68b4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/4601f80fc5fd4c128b6db6f2e0d0ac5e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/cfe9ffbb9e6f4fb3abe0afbbf1b47faa, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/3d0e45935f1b40839f4bd95ccb2c5334, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/267eee7b1e2b4153a37d9a23227be2e0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/aa373d64900f4ed781348f060ec0848c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/76c1acbf43b745309c33d1967ac3a97a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/57adee12f19e472f8d33f3da4694654f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6f65b0435ec446c49a04b7483b548465, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/b8b9a049924a4ee98cae88644ba0f5fe, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/f9a9a92954214558b73eba8bd271be1b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/dbe493a6f1a94ceabb0bd31fc2845d80, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/a197c3475072428ca69b5f23a1a16a3e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/16d1493a542a4b689f3f29cdc469328b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/5e88db0400dc4e779aad6e9395a9cc0d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/0fa96fdb636448f591abce2c59dbbeaa, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/7fbaebae81014543838c4947bd7893c0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/4125b0ff3f494ef399f81b135f528540, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/a5a041e90fd6435887d1591b6a41ff98, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/5f30c5fa484646a58b051a79d377998d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/38f5c0133bee4ac9b54b5d82e26d2b87, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/30c9fbf2e48b4c5092c498908151216e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/ea461df827d249d28ce0550fd55dfa97, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/65508f24de784e9fbc090eebdd6c7c92, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/90e60deb29834d689ed050becd1e70bd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6d5fa825edf34db9b3877ed4ed81386a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/87ac6b64e2e342369d9cec25efd384ad, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/93bfe6fea12f47948dd7f14700dda061, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/7c906416081f40e299545bb33fd0d7ec, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/27d99720aa71462d81b3e2ac366d9c17, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6d1570475810446c9527e11bf620f21e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/312a04e5614b4a2685bdf02a2dbbb424, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/ddf76dc0bf7f40bf910857c5aaf78499] to archive 2024-11-20T19:27:25,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:25,303 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/81e8c419af3945f2b59e631157a79cb8 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/81e8c419af3945f2b59e631157a79cb8 2024-11-20T19:27:25,304 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/2a16f158a4c343618c5c6244b173e9ac to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/2a16f158a4c343618c5c6244b173e9ac 2024-11-20T19:27:25,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/239d9b9ea1d840a1b8e448f909da68b4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/239d9b9ea1d840a1b8e448f909da68b4 2024-11-20T19:27:25,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/4601f80fc5fd4c128b6db6f2e0d0ac5e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/4601f80fc5fd4c128b6db6f2e0d0ac5e 2024-11-20T19:27:25,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/cfe9ffbb9e6f4fb3abe0afbbf1b47faa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/cfe9ffbb9e6f4fb3abe0afbbf1b47faa 2024-11-20T19:27:25,308 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/3d0e45935f1b40839f4bd95ccb2c5334 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/3d0e45935f1b40839f4bd95ccb2c5334 2024-11-20T19:27:25,309 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/267eee7b1e2b4153a37d9a23227be2e0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/267eee7b1e2b4153a37d9a23227be2e0 2024-11-20T19:27:25,311 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/aa373d64900f4ed781348f060ec0848c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/aa373d64900f4ed781348f060ec0848c 2024-11-20T19:27:25,312 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/76c1acbf43b745309c33d1967ac3a97a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/76c1acbf43b745309c33d1967ac3a97a 2024-11-20T19:27:25,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/57adee12f19e472f8d33f3da4694654f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/57adee12f19e472f8d33f3da4694654f 2024-11-20T19:27:25,314 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6f65b0435ec446c49a04b7483b548465 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6f65b0435ec446c49a04b7483b548465 2024-11-20T19:27:25,315 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/b8b9a049924a4ee98cae88644ba0f5fe to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/b8b9a049924a4ee98cae88644ba0f5fe 2024-11-20T19:27:25,316 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/f9a9a92954214558b73eba8bd271be1b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/f9a9a92954214558b73eba8bd271be1b 2024-11-20T19:27:25,316 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/dbe493a6f1a94ceabb0bd31fc2845d80 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/dbe493a6f1a94ceabb0bd31fc2845d80 2024-11-20T19:27:25,317 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/a197c3475072428ca69b5f23a1a16a3e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/a197c3475072428ca69b5f23a1a16a3e 2024-11-20T19:27:25,318 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/16d1493a542a4b689f3f29cdc469328b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/16d1493a542a4b689f3f29cdc469328b 2024-11-20T19:27:25,319 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/5e88db0400dc4e779aad6e9395a9cc0d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/5e88db0400dc4e779aad6e9395a9cc0d 2024-11-20T19:27:25,319 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/0fa96fdb636448f591abce2c59dbbeaa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/0fa96fdb636448f591abce2c59dbbeaa 2024-11-20T19:27:25,320 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/7fbaebae81014543838c4947bd7893c0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/7fbaebae81014543838c4947bd7893c0 2024-11-20T19:27:25,321 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/4125b0ff3f494ef399f81b135f528540 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/4125b0ff3f494ef399f81b135f528540 2024-11-20T19:27:25,322 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/a5a041e90fd6435887d1591b6a41ff98 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/a5a041e90fd6435887d1591b6a41ff98 2024-11-20T19:27:25,322 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/5f30c5fa484646a58b051a79d377998d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/5f30c5fa484646a58b051a79d377998d 2024-11-20T19:27:25,323 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/38f5c0133bee4ac9b54b5d82e26d2b87 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/38f5c0133bee4ac9b54b5d82e26d2b87 2024-11-20T19:27:25,325 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/30c9fbf2e48b4c5092c498908151216e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/30c9fbf2e48b4c5092c498908151216e 2024-11-20T19:27:25,325 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/ea461df827d249d28ce0550fd55dfa97 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/ea461df827d249d28ce0550fd55dfa97 2024-11-20T19:27:25,326 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/65508f24de784e9fbc090eebdd6c7c92 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/65508f24de784e9fbc090eebdd6c7c92 2024-11-20T19:27:25,327 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/90e60deb29834d689ed050becd1e70bd to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/90e60deb29834d689ed050becd1e70bd 2024-11-20T19:27:25,327 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6d5fa825edf34db9b3877ed4ed81386a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6d5fa825edf34db9b3877ed4ed81386a 2024-11-20T19:27:25,328 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/87ac6b64e2e342369d9cec25efd384ad to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/87ac6b64e2e342369d9cec25efd384ad 2024-11-20T19:27:25,329 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/93bfe6fea12f47948dd7f14700dda061 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/93bfe6fea12f47948dd7f14700dda061 2024-11-20T19:27:25,330 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/7c906416081f40e299545bb33fd0d7ec to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/7c906416081f40e299545bb33fd0d7ec 2024-11-20T19:27:25,330 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/27d99720aa71462d81b3e2ac366d9c17 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/27d99720aa71462d81b3e2ac366d9c17 2024-11-20T19:27:25,331 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6d1570475810446c9527e11bf620f21e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/6d1570475810446c9527e11bf620f21e 2024-11-20T19:27:25,332 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/312a04e5614b4a2685bdf02a2dbbb424 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/312a04e5614b4a2685bdf02a2dbbb424 2024-11-20T19:27:25,333 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/ddf76dc0bf7f40bf910857c5aaf78499 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/ddf76dc0bf7f40bf910857c5aaf78499 2024-11-20T19:27:25,334 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/c1fc990b346245c191e052e0faa7def5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/2025cb93acf84daa93ab30dd7acfff30, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e8c8ee28ba48450a8c91ce3e2015bda0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/5e7ea7b3a76e4f079187cdd12bcd07ec, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6353b1a270324fa0bdd7a5e63b949b49, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/64b7cec41848416e998f9be5e5c9c875, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6638b4ffc7bd470f81f4793edb989972, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/438a5135f04e43818b405c51ceec835a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/c09f9358568544dca65fe23a7fe0cc49, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/db5be57a0b6047109bb423bc8e01ff89, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e07990235b8a49db89d68dd81bfd5335, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/98a1e68d561544699636ce39017d8fa4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e5bccfd9b1be4cfc803ab8acda2caa5b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/23cd5a1809ac4c559055ac746145d3ce, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/78c38127061e450fb298b3ecf9aab7db, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/f47dfcdb4163416d944de27b7a870757, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/0d3f7eea94f44e138884b62a3c58ed73, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/552083c9c8394f4cb3f1807bafa85463, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/14425ecb2e4543128a01cf1e52659ab1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/8194f8d2ad63498abbad355b71cc14e6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/3df8d501ef3d4c6ea78032c8184d41ee, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/d86b1a2aa5cb4a388e79d60e86e249b3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/b33df585a7644c33b4434fe494243676, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/aa340115c5444ff183c9a0a8e198883c, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/af9baaba0ec348878f6be4e93900c6c2, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/58732d1f03d5460992bd5a25dcae8797, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/08f32b0081854acd9dda47a8113aa0f8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/2c4471b6497142048ff6aa9deb2d9338, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/b1b29ba894924fe19e2c66f87f135481, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/fde096403d0e4be58740488ecb700e2e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6c5cd6f3248e4edabcdf8624d0b339c1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/896d802a660e41d0bd20dd8fe63d07ad, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/18c448acec00404b85c5b602ed65a84e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/fd1c81c87153438e98fd463f484cb856, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/ec36feb49ca4493f84d004464448a243] to archive 2024-11-20T19:27:25,335 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:25,336 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/c1fc990b346245c191e052e0faa7def5 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/c1fc990b346245c191e052e0faa7def5 2024-11-20T19:27:25,337 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/2025cb93acf84daa93ab30dd7acfff30 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/2025cb93acf84daa93ab30dd7acfff30 2024-11-20T19:27:25,337 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e8c8ee28ba48450a8c91ce3e2015bda0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e8c8ee28ba48450a8c91ce3e2015bda0 2024-11-20T19:27:25,338 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/5e7ea7b3a76e4f079187cdd12bcd07ec to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/5e7ea7b3a76e4f079187cdd12bcd07ec 2024-11-20T19:27:25,339 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6353b1a270324fa0bdd7a5e63b949b49 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6353b1a270324fa0bdd7a5e63b949b49 2024-11-20T19:27:25,340 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/64b7cec41848416e998f9be5e5c9c875 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/64b7cec41848416e998f9be5e5c9c875 2024-11-20T19:27:25,341 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6638b4ffc7bd470f81f4793edb989972 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6638b4ffc7bd470f81f4793edb989972 2024-11-20T19:27:25,342 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/438a5135f04e43818b405c51ceec835a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/438a5135f04e43818b405c51ceec835a 2024-11-20T19:27:25,343 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/c09f9358568544dca65fe23a7fe0cc49 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/c09f9358568544dca65fe23a7fe0cc49 2024-11-20T19:27:25,344 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/db5be57a0b6047109bb423bc8e01ff89 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/db5be57a0b6047109bb423bc8e01ff89 2024-11-20T19:27:25,344 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e07990235b8a49db89d68dd81bfd5335 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e07990235b8a49db89d68dd81bfd5335 2024-11-20T19:27:25,345 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/98a1e68d561544699636ce39017d8fa4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/98a1e68d561544699636ce39017d8fa4 2024-11-20T19:27:25,346 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e5bccfd9b1be4cfc803ab8acda2caa5b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/e5bccfd9b1be4cfc803ab8acda2caa5b 2024-11-20T19:27:25,347 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/23cd5a1809ac4c559055ac746145d3ce to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/23cd5a1809ac4c559055ac746145d3ce 2024-11-20T19:27:25,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/78c38127061e450fb298b3ecf9aab7db to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/78c38127061e450fb298b3ecf9aab7db 2024-11-20T19:27:25,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/f47dfcdb4163416d944de27b7a870757 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/f47dfcdb4163416d944de27b7a870757 2024-11-20T19:27:25,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/0d3f7eea94f44e138884b62a3c58ed73 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/0d3f7eea94f44e138884b62a3c58ed73 2024-11-20T19:27:25,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/552083c9c8394f4cb3f1807bafa85463 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/552083c9c8394f4cb3f1807bafa85463 2024-11-20T19:27:25,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/14425ecb2e4543128a01cf1e52659ab1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/14425ecb2e4543128a01cf1e52659ab1 2024-11-20T19:27:25,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/8194f8d2ad63498abbad355b71cc14e6 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/8194f8d2ad63498abbad355b71cc14e6 2024-11-20T19:27:25,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/3df8d501ef3d4c6ea78032c8184d41ee to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/3df8d501ef3d4c6ea78032c8184d41ee 2024-11-20T19:27:25,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/d86b1a2aa5cb4a388e79d60e86e249b3 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/d86b1a2aa5cb4a388e79d60e86e249b3 2024-11-20T19:27:25,354 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/b33df585a7644c33b4434fe494243676 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/b33df585a7644c33b4434fe494243676 2024-11-20T19:27:25,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/aa340115c5444ff183c9a0a8e198883c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/aa340115c5444ff183c9a0a8e198883c 2024-11-20T19:27:25,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/af9baaba0ec348878f6be4e93900c6c2 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/af9baaba0ec348878f6be4e93900c6c2 2024-11-20T19:27:25,356 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/58732d1f03d5460992bd5a25dcae8797 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/58732d1f03d5460992bd5a25dcae8797 2024-11-20T19:27:25,357 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/08f32b0081854acd9dda47a8113aa0f8 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/08f32b0081854acd9dda47a8113aa0f8 2024-11-20T19:27:25,358 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/2c4471b6497142048ff6aa9deb2d9338 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/2c4471b6497142048ff6aa9deb2d9338 2024-11-20T19:27:25,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/b1b29ba894924fe19e2c66f87f135481 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/b1b29ba894924fe19e2c66f87f135481 2024-11-20T19:27:25,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/fde096403d0e4be58740488ecb700e2e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/fde096403d0e4be58740488ecb700e2e 2024-11-20T19:27:25,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6c5cd6f3248e4edabcdf8624d0b339c1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6c5cd6f3248e4edabcdf8624d0b339c1 2024-11-20T19:27:25,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/896d802a660e41d0bd20dd8fe63d07ad to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/896d802a660e41d0bd20dd8fe63d07ad 2024-11-20T19:27:25,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/18c448acec00404b85c5b602ed65a84e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/18c448acec00404b85c5b602ed65a84e 2024-11-20T19:27:25,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/fd1c81c87153438e98fd463f484cb856 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/fd1c81c87153438e98fd463f484cb856 2024-11-20T19:27:25,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/ec36feb49ca4493f84d004464448a243 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/ec36feb49ca4493f84d004464448a243 2024-11-20T19:27:25,367 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/recovered.edits/606.seqid, newMaxSeqId=606, maxSeqId=1 2024-11-20T19:27:25,367 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc. 2024-11-20T19:27:25,367 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1635): Region close journal for 794552c8bce342231c204cc0e02fbebc: 2024-11-20T19:27:25,368 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(170): Closed 794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:25,369 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=794552c8bce342231c204cc0e02fbebc, regionState=CLOSED 2024-11-20T19:27:25,370 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-20T19:27:25,370 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; CloseRegionProcedure 794552c8bce342231c204cc0e02fbebc, server=db9c3a6c6492,35979,1732130703276 in 2.1660 sec 2024-11-20T19:27:25,371 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=147 2024-11-20T19:27:25,371 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=794552c8bce342231c204cc0e02fbebc, UNASSIGN in 2.1690 sec 2024-11-20T19:27:25,372 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-20T19:27:25,372 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.1710 sec 2024-11-20T19:27:25,373 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130845373"}]},"ts":"1732130845373"} 2024-11-20T19:27:25,374 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T19:27:25,416 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T19:27:25,418 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.2450 sec 2024-11-20T19:27:27,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-20T19:27:27,279 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-20T19:27:27,279 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T19:27:27,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:27,281 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=150, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:27,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T19:27:27,281 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=150, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:27,284 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:27,285 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/recovered.edits] 2024-11-20T19:27:27,287 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/220d2bcc1c50442cac0371758421f783 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/220d2bcc1c50442cac0371758421f783 2024-11-20T19:27:27,288 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/ad12ab8f07834972b47930e91d4fdb01 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/ad12ab8f07834972b47930e91d4fdb01 2024-11-20T19:27:27,289 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/dec3fa362135486c8775f62ef14744eb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/dec3fa362135486c8775f62ef14744eb 2024-11-20T19:27:27,289 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/e420459afe4a4938a98756e44a12bee5 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/e420459afe4a4938a98756e44a12bee5 2024-11-20T19:27:27,290 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/f0c81454babb49848132f3b220f3c208 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/A/f0c81454babb49848132f3b220f3c208 2024-11-20T19:27:27,293 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/1f140f37320e453baf772b724f844f58 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/1f140f37320e453baf772b724f844f58 2024-11-20T19:27:27,294 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/68e581547ace45ebae09cfaf42f77aff to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/68e581547ace45ebae09cfaf42f77aff 2024-11-20T19:27:27,294 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/739c3e6bdefb4d9daf45b775e9d02271 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/739c3e6bdefb4d9daf45b775e9d02271 2024-11-20T19:27:27,295 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/99cdddd7f6f34c7793236f49559e4a36 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/99cdddd7f6f34c7793236f49559e4a36 2024-11-20T19:27:27,296 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/a8e85be6e14e49f7b7ed335baf54a323 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/B/a8e85be6e14e49f7b7ed335baf54a323 2024-11-20T19:27:27,298 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/63943288a77a48598c7bb73f0a9d3ec8 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/63943288a77a48598c7bb73f0a9d3ec8 2024-11-20T19:27:27,299 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/654fc028b34c402db91b546b69ec03ac to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/654fc028b34c402db91b546b69ec03ac 2024-11-20T19:27:27,300 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6cde54406155436d81bd8ecf28a87d80 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/6cde54406155436d81bd8ecf28a87d80 2024-11-20T19:27:27,300 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/cfb7750847ac43fca0e663d2bf07ffe0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/cfb7750847ac43fca0e663d2bf07ffe0 2024-11-20T19:27:27,301 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/d29487f945bd433fb811068a18fcf618 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/C/d29487f945bd433fb811068a18fcf618 2024-11-20T19:27:27,303 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/recovered.edits/606.seqid to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc/recovered.edits/606.seqid 2024-11-20T19:27:27,303 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/794552c8bce342231c204cc0e02fbebc 2024-11-20T19:27:27,304 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T19:27:27,306 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=150, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:27,307 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T19:27:27,315 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T19:27:27,316 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=150, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:27,316 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T19:27:27,316 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732130847316"}]},"ts":"9223372036854775807"} 2024-11-20T19:27:27,317 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T19:27:27,317 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 794552c8bce342231c204cc0e02fbebc, NAME => 'TestAcidGuarantees,,1732130821317.794552c8bce342231c204cc0e02fbebc.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T19:27:27,318 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T19:27:27,318 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732130847318"}]},"ts":"9223372036854775807"} 2024-11-20T19:27:27,321 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T19:27:27,359 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=150, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:27,359 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 79 msec 2024-11-20T19:27:27,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-20T19:27:27,382 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-11-20T19:27:27,391 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=241 (was 241), OpenFileDescriptor=462 (was 467), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=651 (was 685), ProcessCount=11 (was 11), AvailableMemoryMB=3209 (was 3236) 2024-11-20T19:27:27,401 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=241, OpenFileDescriptor=462, MaxFileDescriptor=1048576, SystemLoadAverage=651, ProcessCount=11, AvailableMemoryMB=3209 2024-11-20T19:27:27,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:27:27,403 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:27:27,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=151, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:27,404 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-20T19:27:27,404 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:27,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 151 2024-11-20T19:27:27,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-20T19:27:27,405 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-20T19:27:27,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742465_1641 (size=963) 2024-11-20T19:27:27,424 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d 2024-11-20T19:27:27,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742466_1642 (size=53) 2024-11-20T19:27:27,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-20T19:27:27,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-20T19:27:27,847 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:27:27,847 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing d8a34887157cb9c8687afaaeab650abb, disabling compactions & flushes 2024-11-20T19:27:27,847 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:27,847 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:27,847 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. after waiting 0 ms 2024-11-20T19:27:27,847 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:27,847 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:27,847 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:27,848 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-20T19:27:27,848 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732130847848"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732130847848"}]},"ts":"1732130847848"} 2024-11-20T19:27:27,849 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-20T19:27:27,849 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-20T19:27:27,849 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130847849"}]},"ts":"1732130847849"} 2024-11-20T19:27:27,850 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-20T19:27:27,900 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d8a34887157cb9c8687afaaeab650abb, ASSIGN}] 2024-11-20T19:27:27,901 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d8a34887157cb9c8687afaaeab650abb, ASSIGN 2024-11-20T19:27:27,901 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d8a34887157cb9c8687afaaeab650abb, ASSIGN; state=OFFLINE, location=db9c3a6c6492,35979,1732130703276; forceNewPlan=false, retain=false 2024-11-20T19:27:28,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-20T19:27:28,052 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=d8a34887157cb9c8687afaaeab650abb, regionState=OPENING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:28,053 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; OpenRegionProcedure d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:27:28,204 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:28,206 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:28,206 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(7285): Opening region: {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:27:28,207 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:28,207 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:27:28,207 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(7327): checking encryption for d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:28,207 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(7330): checking classloading for d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:28,207 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:28,208 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:28,208 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d8a34887157cb9c8687afaaeab650abb columnFamilyName A 2024-11-20T19:27:28,209 DEBUG [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:28,209 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.HStore(327): Store=d8a34887157cb9c8687afaaeab650abb/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:28,209 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:28,210 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:28,210 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d8a34887157cb9c8687afaaeab650abb columnFamilyName B 2024-11-20T19:27:28,210 DEBUG [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:28,210 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.HStore(327): Store=d8a34887157cb9c8687afaaeab650abb/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:28,210 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:28,211 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:28,211 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d8a34887157cb9c8687afaaeab650abb columnFamilyName C 2024-11-20T19:27:28,211 DEBUG [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:28,211 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.HStore(327): Store=d8a34887157cb9c8687afaaeab650abb/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:28,212 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:28,212 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:28,212 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:28,213 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:27:28,214 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(1085): writing seq id for d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:28,215 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-20T19:27:28,215 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(1102): Opened d8a34887157cb9c8687afaaeab650abb; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62261912, jitterRate=-0.07222521305084229}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:27:28,216 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(1001): Region open journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:28,216 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., pid=153, masterSystemTime=1732130848204 2024-11-20T19:27:28,217 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:28,217 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:28,218 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=d8a34887157cb9c8687afaaeab650abb, regionState=OPEN, openSeqNum=2, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:28,219 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-20T19:27:28,219 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; OpenRegionProcedure d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 in 165 msec 2024-11-20T19:27:28,220 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-11-20T19:27:28,220 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d8a34887157cb9c8687afaaeab650abb, ASSIGN in 319 msec 2024-11-20T19:27:28,221 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-20T19:27:28,221 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130848221"}]},"ts":"1732130848221"} 2024-11-20T19:27:28,221 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-20T19:27:28,267 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-20T19:27:28,268 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 864 msec 2024-11-20T19:27:28,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-20T19:27:28,508 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 151 completed 2024-11-20T19:27:28,509 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d9954b7 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3fb684eb 2024-11-20T19:27:28,537 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@537a66f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:28,538 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:28,539 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53550, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:28,539 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-20T19:27:28,540 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38604, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-20T19:27:28,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-20T19:27:28,541 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-20T19:27:28,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:28,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742467_1643 (size=999) 2024-11-20T19:27:28,552 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-11-20T19:27:28,553 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-11-20T19:27:28,554 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:27:28,556 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d8a34887157cb9c8687afaaeab650abb, REOPEN/MOVE}] 2024-11-20T19:27:28,556 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d8a34887157cb9c8687afaaeab650abb, REOPEN/MOVE 2024-11-20T19:27:28,556 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=d8a34887157cb9c8687afaaeab650abb, regionState=CLOSING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:28,557 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:27:28,557 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; CloseRegionProcedure d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:27:28,708 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:28,709 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(124): Close d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:28,709 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:27:28,709 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1681): Closing d8a34887157cb9c8687afaaeab650abb, disabling compactions & flushes 2024-11-20T19:27:28,709 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:28,709 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:28,709 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. after waiting 0 ms 2024-11-20T19:27:28,709 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:28,712 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-20T19:27:28,712 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:28,712 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1635): Region close journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:28,712 WARN [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegionServer(3786): Not adding moved region record: d8a34887157cb9c8687afaaeab650abb to self. 2024-11-20T19:27:28,713 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(170): Closed d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:28,713 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=d8a34887157cb9c8687afaaeab650abb, regionState=CLOSED 2024-11-20T19:27:28,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-11-20T19:27:28,715 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d8a34887157cb9c8687afaaeab650abb, REOPEN/MOVE; state=CLOSED, location=db9c3a6c6492,35979,1732130703276; forceNewPlan=false, retain=true 2024-11-20T19:27:28,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; CloseRegionProcedure d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 in 157 msec 2024-11-20T19:27:28,866 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=d8a34887157cb9c8687afaaeab650abb, regionState=OPENING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:28,867 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=156, state=RUNNABLE; OpenRegionProcedure d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:27:29,018 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,020 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:29,020 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(7285): Opening region: {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} 2024-11-20T19:27:29,021 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:29,021 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-20T19:27:29,021 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(7327): checking encryption for d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:29,021 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(7330): checking classloading for d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:29,021 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:29,022 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:29,022 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d8a34887157cb9c8687afaaeab650abb columnFamilyName A 2024-11-20T19:27:29,024 DEBUG [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:29,024 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.HStore(327): Store=d8a34887157cb9c8687afaaeab650abb/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:29,024 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:29,024 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:29,025 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d8a34887157cb9c8687afaaeab650abb columnFamilyName B 2024-11-20T19:27:29,025 DEBUG [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:29,025 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.HStore(327): Store=d8a34887157cb9c8687afaaeab650abb/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:29,025 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:29,025 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-11-20T19:27:29,025 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d8a34887157cb9c8687afaaeab650abb columnFamilyName C 2024-11-20T19:27:29,025 DEBUG [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:29,026 INFO [StoreOpener-d8a34887157cb9c8687afaaeab650abb-1 {}] regionserver.HStore(327): Store=d8a34887157cb9c8687afaaeab650abb/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-20T19:27:29,026 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:29,026 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:29,027 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:29,028 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-20T19:27:29,029 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(1085): writing seq id for d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:29,030 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(1102): Opened d8a34887157cb9c8687afaaeab650abb; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61242304, jitterRate=-0.0874185562133789}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-20T19:27:29,030 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(1001): Region open journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:29,031 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., pid=158, masterSystemTime=1732130849018 2024-11-20T19:27:29,032 DEBUG [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:29,032 INFO [RS_OPEN_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:29,032 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=d8a34887157cb9c8687afaaeab650abb, regionState=OPEN, openSeqNum=5, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,034 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=156 2024-11-20T19:27:29,034 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=156, state=SUCCESS; OpenRegionProcedure d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 in 166 msec 2024-11-20T19:27:29,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-11-20T19:27:29,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d8a34887157cb9c8687afaaeab650abb, REOPEN/MOVE in 479 msec 2024-11-20T19:27:29,037 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-20T19:27:29,037 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 481 msec 2024-11-20T19:27:29,038 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 495 msec 2024-11-20T19:27:29,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-20T19:27:29,040 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d5efb7a to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@644b7e6 2024-11-20T19:27:29,084 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6094c70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:29,085 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7fc332d8 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c9b5141 2024-11-20T19:27:29,092 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@103dfc6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:29,093 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17327621 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11a52cdf 2024-11-20T19:27:29,100 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e047c09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:29,101 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1584f18a to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d7fe431 2024-11-20T19:27:29,109 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60d631a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:29,109 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b914bf4 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@91d72db 2024-11-20T19:27:29,125 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58971172, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:29,126 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d836f78 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d7fe93b 2024-11-20T19:27:29,140 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7846cb78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:29,141 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53305d9b to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11c440f7 2024-11-20T19:27:29,150 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f1754bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:29,151 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bb6288a to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58460ef3 2024-11-20T19:27:29,167 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d9113f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:29,168 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06556601 to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e8cd1ae 2024-11-20T19:27:29,184 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bb75907, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:29,185 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x458a85fd to 127.0.0.1:50476 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4d832d43 2024-11-20T19:27:29,192 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c1d3a95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-20T19:27:29,203 DEBUG [hconnection-0x5c55f643-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:29,204 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53552, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:29,215 DEBUG [hconnection-0x4b357a67-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:29,216 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53562, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:29,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:29,219 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:27:29,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:29,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:29,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:29,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:29,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:29,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:29,227 DEBUG [hconnection-0x418d0c6a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:29,227 DEBUG [hconnection-0x1399f523-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:29,228 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53576, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:29,229 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53584, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:29,266 DEBUG [hconnection-0x6bd79029-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:29,269 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53588, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:29,271 DEBUG [hconnection-0x649f4022-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:29,272 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53602, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:29,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130909284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130909284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130909285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,290 DEBUG [hconnection-0x7ad63253-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:29,292 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53614, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:29,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130909295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,303 DEBUG [hconnection-0x5a9e2fde-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:29,308 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53626, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:29,311 DEBUG [hconnection-0x33a69696-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:29,312 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53630, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:29,314 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:29,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-11-20T19:27:29,315 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:29,316 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:29,316 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:29,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T19:27:29,327 DEBUG [hconnection-0x637dfc02-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-20T19:27:29,328 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53642, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-20T19:27:29,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130909331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cd01d367b3b147469fd902713575db90_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130849213/Put/seqid=0 2024-11-20T19:27:29,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130909386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130909386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130909389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130909397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742468_1644 (size=12154) 2024-11-20T19:27:29,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T19:27:29,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130909434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,467 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T19:27:29,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:29,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:29,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:29,468 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130909589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130909590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130909593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130909600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T19:27:29,622 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,623 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T19:27:29,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:29,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:29,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:29,623 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130909638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,775 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T19:27:29,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:29,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:29,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:29,776 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,806 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:29,810 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cd01d367b3b147469fd902713575db90_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cd01d367b3b147469fd902713575db90_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:29,811 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/b3efa72140de4798b7102997f9df3048, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:29,811 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/b3efa72140de4798b7102997f9df3048 is 175, key is test_row_0/A:col10/1732130849213/Put/seqid=0 2024-11-20T19:27:29,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742469_1645 (size=30955) 2024-11-20T19:27:29,833 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/b3efa72140de4798b7102997f9df3048 2024-11-20T19:27:29,859 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/346285c5e5cd4c9ebe9e4493de5f19d9 is 50, key is test_row_0/B:col10/1732130849213/Put/seqid=0 2024-11-20T19:27:29,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130909892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742470_1646 (size=12001) 2024-11-20T19:27:29,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130909896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130909904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130909904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,906 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/346285c5e5cd4c9ebe9e4493de5f19d9 2024-11-20T19:27:29,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T19:27:29,927 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,928 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T19:27:29,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:29,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:29,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:29,928 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:29,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:29,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130909942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:29,955 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/87932f2594544b0192a616170e8717ff is 50, key is test_row_0/C:col10/1732130849213/Put/seqid=0 2024-11-20T19:27:29,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742471_1647 (size=12001) 2024-11-20T19:27:29,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/87932f2594544b0192a616170e8717ff 2024-11-20T19:27:29,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/b3efa72140de4798b7102997f9df3048 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/b3efa72140de4798b7102997f9df3048 2024-11-20T19:27:29,999 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/b3efa72140de4798b7102997f9df3048, entries=150, sequenceid=15, filesize=30.2 K 2024-11-20T19:27:30,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/346285c5e5cd4c9ebe9e4493de5f19d9 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/346285c5e5cd4c9ebe9e4493de5f19d9 2024-11-20T19:27:30,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/346285c5e5cd4c9ebe9e4493de5f19d9, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T19:27:30,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/87932f2594544b0192a616170e8717ff as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/87932f2594544b0192a616170e8717ff 2024-11-20T19:27:30,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/87932f2594544b0192a616170e8717ff, entries=150, sequenceid=15, filesize=11.7 K 2024-11-20T19:27:30,012 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for d8a34887157cb9c8687afaaeab650abb in 794ms, sequenceid=15, compaction requested=false 2024-11-20T19:27:30,012 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:30,080 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-20T19:27:30,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:30,081 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:27:30,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:30,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:30,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:30,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:30,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:30,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:30,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112000e09ec36e3f48689c46d9754f195d41_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130849282/Put/seqid=0 2024-11-20T19:27:30,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742472_1648 (size=12154) 2024-11-20T19:27:30,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,115 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112000e09ec36e3f48689c46d9754f195d41_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112000e09ec36e3f48689c46d9754f195d41_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:30,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/d1271008cd04458fbe97dc7b27eca200, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:30,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/d1271008cd04458fbe97dc7b27eca200 is 175, key is test_row_0/A:col10/1732130849282/Put/seqid=0 2024-11-20T19:27:30,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742473_1649 (size=30955) 2024-11-20T19:27:30,124 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/d1271008cd04458fbe97dc7b27eca200 2024-11-20T19:27:30,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/16da57388c8743beac29b9f718fe7c15 is 50, key is test_row_0/B:col10/1732130849282/Put/seqid=0 2024-11-20T19:27:30,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742474_1650 (size=12001) 2024-11-20T19:27:30,170 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/16da57388c8743beac29b9f718fe7c15 2024-11-20T19:27:30,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/54856a08a8ba40edaf837c47263392f7 is 50, key is test_row_0/C:col10/1732130849282/Put/seqid=0 2024-11-20T19:27:30,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742475_1651 (size=12001) 2024-11-20T19:27:30,251 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/54856a08a8ba40edaf837c47263392f7 2024-11-20T19:27:30,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/d1271008cd04458fbe97dc7b27eca200 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d1271008cd04458fbe97dc7b27eca200 2024-11-20T19:27:30,262 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d1271008cd04458fbe97dc7b27eca200, entries=150, sequenceid=40, filesize=30.2 K 2024-11-20T19:27:30,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/16da57388c8743beac29b9f718fe7c15 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/16da57388c8743beac29b9f718fe7c15 2024-11-20T19:27:30,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,268 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/16da57388c8743beac29b9f718fe7c15, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T19:27:30,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/54856a08a8ba40edaf837c47263392f7 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/54856a08a8ba40edaf837c47263392f7 2024-11-20T19:27:30,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,295 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/54856a08a8ba40edaf837c47263392f7, entries=150, sequenceid=40, filesize=11.7 K 2024-11-20T19:27:30,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,296 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for d8a34887157cb9c8687afaaeab650abb in 215ms, sequenceid=40, compaction requested=false 2024-11-20T19:27:30,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:30,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:30,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-11-20T19:27:30,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-11-20T19:27:30,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,298 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-20T19:27:30,298 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 981 msec 2024-11-20T19:27:30,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,299 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 984 msec 2024-11-20T19:27:30,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-20T19:27:30,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,423 INFO [Thread-2818 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-11-20T19:27:30,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,424 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:30,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-20T19:27:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T19:27:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,426 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:30,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,427 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:30,427 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:30,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,440 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:27:30,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:30,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:30,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:30,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:30,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:30,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:30,441 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:30,450 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207d583607d99143b2a69117d81e5b61e5_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130850407/Put/seqid=0 2024-11-20T19:27:30,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130910467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130910469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130910471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130910471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130910473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742476_1652 (size=12154) 2024-11-20T19:27:30,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T19:27:30,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130910574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130910575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130910575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130910575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,579 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,579 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T19:27:30,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:30,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:30,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:30,579 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130910579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T19:27:30,732 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,732 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T19:27:30,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:30,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:30,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:30,732 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130910779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130910779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130910781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130910781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:30,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130910783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,884 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:30,885 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T19:27:30,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:30,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:30,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:30,885 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:30,894 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-20T19:27:30,896 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:30,899 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411207d583607d99143b2a69117d81e5b61e5_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207d583607d99143b2a69117d81e5b61e5_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:30,900 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/c5e6d505add84abfbb87f6eca6c5ad5e, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:30,901 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/c5e6d505add84abfbb87f6eca6c5ad5e is 175, key is test_row_0/A:col10/1732130850407/Put/seqid=0 2024-11-20T19:27:30,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742477_1653 (size=30955) 2024-11-20T19:27:30,954 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/c5e6d505add84abfbb87f6eca6c5ad5e 2024-11-20T19:27:30,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/de77d8b0ae244c8b9828c537b659c0e4 is 50, key is test_row_0/B:col10/1732130850407/Put/seqid=0 2024-11-20T19:27:31,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742478_1654 (size=12001) 2024-11-20T19:27:31,026 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/de77d8b0ae244c8b9828c537b659c0e4 2024-11-20T19:27:31,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T19:27:31,035 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/2ec63a4a80754ee2af9374dd5200fa7a is 50, key is test_row_0/C:col10/1732130850407/Put/seqid=0 2024-11-20T19:27:31,036 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T19:27:31,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:31,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:31,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:31,037 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:31,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:31,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:31,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742479_1655 (size=12001) 2024-11-20T19:27:31,067 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/2ec63a4a80754ee2af9374dd5200fa7a 2024-11-20T19:27:31,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/c5e6d505add84abfbb87f6eca6c5ad5e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/c5e6d505add84abfbb87f6eca6c5ad5e 2024-11-20T19:27:31,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/c5e6d505add84abfbb87f6eca6c5ad5e, entries=150, sequenceid=52, filesize=30.2 K 2024-11-20T19:27:31,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/de77d8b0ae244c8b9828c537b659c0e4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/de77d8b0ae244c8b9828c537b659c0e4 2024-11-20T19:27:31,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130911083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130911084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,086 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/de77d8b0ae244c8b9828c537b659c0e4, entries=150, sequenceid=52, filesize=11.7 K 2024-11-20T19:27:31,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/2ec63a4a80754ee2af9374dd5200fa7a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/2ec63a4a80754ee2af9374dd5200fa7a 2024-11-20T19:27:31,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130911085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130911085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130911088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/2ec63a4a80754ee2af9374dd5200fa7a, entries=150, sequenceid=52, filesize=11.7 K 2024-11-20T19:27:31,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d8a34887157cb9c8687afaaeab650abb in 654ms, sequenceid=52, compaction requested=true 2024-11-20T19:27:31,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:31,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:31,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:31,094 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:31,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:31,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:31,094 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:31,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:31,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:31,095 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:31,095 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/B is initiating minor compaction (all files) 2024-11-20T19:27:31,095 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/B in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:31,095 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/346285c5e5cd4c9ebe9e4493de5f19d9, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/16da57388c8743beac29b9f718fe7c15, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/de77d8b0ae244c8b9828c537b659c0e4] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=35.2 K 2024-11-20T19:27:31,095 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:31,095 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/A is initiating minor compaction (all files) 2024-11-20T19:27:31,095 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/A in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:31,095 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/b3efa72140de4798b7102997f9df3048, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d1271008cd04458fbe97dc7b27eca200, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/c5e6d505add84abfbb87f6eca6c5ad5e] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=90.7 K 2024-11-20T19:27:31,095 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:31,095 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/b3efa72140de4798b7102997f9df3048, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d1271008cd04458fbe97dc7b27eca200, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/c5e6d505add84abfbb87f6eca6c5ad5e] 2024-11-20T19:27:31,096 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 346285c5e5cd4c9ebe9e4493de5f19d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732130849213 2024-11-20T19:27:31,096 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3efa72140de4798b7102997f9df3048, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732130849213 2024-11-20T19:27:31,097 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 16da57388c8743beac29b9f718fe7c15, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732130849279 2024-11-20T19:27:31,097 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1271008cd04458fbe97dc7b27eca200, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732130849279 2024-11-20T19:27:31,097 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting de77d8b0ae244c8b9828c537b659c0e4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130850407 2024-11-20T19:27:31,098 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5e6d505add84abfbb87f6eca6c5ad5e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130850407 2024-11-20T19:27:31,115 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:31,133 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#B#compaction#562 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:31,133 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/adc5c829bb5f425caf1a18ffe5a3198f is 50, key is test_row_0/B:col10/1732130850407/Put/seqid=0 2024-11-20T19:27:31,139 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120445e4f2cba064b0caec865a374688586_d8a34887157cb9c8687afaaeab650abb store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:31,141 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120445e4f2cba064b0caec865a374688586_d8a34887157cb9c8687afaaeab650abb, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:31,141 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120445e4f2cba064b0caec865a374688586_d8a34887157cb9c8687afaaeab650abb because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:31,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742480_1656 (size=12104) 2024-11-20T19:27:31,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742481_1657 (size=4469) 2024-11-20T19:27:31,184 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#A#compaction#561 average throughput is 0.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:31,185 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/9b89cfcca911448c803bc8cbda4d81fe is 175, key is test_row_0/A:col10/1732130850407/Put/seqid=0 2024-11-20T19:27:31,189 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-20T19:27:31,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:31,189 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T19:27:31,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:31,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:31,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:31,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:31,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:31,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:31,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742482_1658 (size=31058) 2024-11-20T19:27:31,215 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/9b89cfcca911448c803bc8cbda4d81fe as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9b89cfcca911448c803bc8cbda4d81fe 2024-11-20T19:27:31,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dac5643565a4433a90b8426f591f3ad4_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130850469/Put/seqid=0 2024-11-20T19:27:31,221 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/A of d8a34887157cb9c8687afaaeab650abb into 9b89cfcca911448c803bc8cbda4d81fe(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:31,221 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:31,221 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/A, priority=13, startTime=1732130851094; duration=0sec 2024-11-20T19:27:31,221 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:31,221 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:A 2024-11-20T19:27:31,221 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:31,223 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:31,223 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/C is initiating minor compaction (all files) 2024-11-20T19:27:31,223 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/C in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:31,223 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/87932f2594544b0192a616170e8717ff, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/54856a08a8ba40edaf837c47263392f7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/2ec63a4a80754ee2af9374dd5200fa7a] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=35.2 K 2024-11-20T19:27:31,223 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87932f2594544b0192a616170e8717ff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732130849213 2024-11-20T19:27:31,223 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 54856a08a8ba40edaf837c47263392f7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732130849279 2024-11-20T19:27:31,224 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ec63a4a80754ee2af9374dd5200fa7a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130850407 2024-11-20T19:27:31,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742483_1659 (size=12154) 2024-11-20T19:27:31,247 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#C#compaction#564 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:31,247 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/a0af1d1c2ae342f0a77f3c186598813b is 50, key is test_row_0/C:col10/1732130850407/Put/seqid=0 2024-11-20T19:27:31,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:31,253 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dac5643565a4433a90b8426f591f3ad4_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dac5643565a4433a90b8426f591f3ad4_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:31,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/123f71e217594439a164b80553efc488, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:31,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/123f71e217594439a164b80553efc488 is 175, key is test_row_0/A:col10/1732130850469/Put/seqid=0 2024-11-20T19:27:31,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742484_1660 (size=12104) 2024-11-20T19:27:31,319 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/a0af1d1c2ae342f0a77f3c186598813b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/a0af1d1c2ae342f0a77f3c186598813b 2024-11-20T19:27:31,325 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/C of d8a34887157cb9c8687afaaeab650abb into a0af1d1c2ae342f0a77f3c186598813b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:31,325 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:31,325 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/C, priority=13, startTime=1732130851094; duration=0sec 2024-11-20T19:27:31,325 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:31,325 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:C 2024-11-20T19:27:31,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742485_1661 (size=30955) 2024-11-20T19:27:31,342 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=76, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/123f71e217594439a164b80553efc488 2024-11-20T19:27:31,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/001cf4d2dbd844f1beec6c7f85e88ccb is 50, key is test_row_0/B:col10/1732130850469/Put/seqid=0 2024-11-20T19:27:31,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742486_1662 (size=12001) 2024-11-20T19:27:31,386 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/001cf4d2dbd844f1beec6c7f85e88ccb 2024-11-20T19:27:31,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/9154e73852bd40e6a3208a0de8f79949 is 50, key is test_row_0/C:col10/1732130850469/Put/seqid=0 2024-11-20T19:27:31,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742487_1663 (size=12001) 2024-11-20T19:27:31,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T19:27:31,573 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/adc5c829bb5f425caf1a18ffe5a3198f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/adc5c829bb5f425caf1a18ffe5a3198f 2024-11-20T19:27:31,574 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-20T19:27:31,577 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/B of d8a34887157cb9c8687afaaeab650abb into adc5c829bb5f425caf1a18ffe5a3198f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:31,577 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:31,577 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/B, priority=13, startTime=1732130851094; duration=0sec 2024-11-20T19:27:31,577 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:31,577 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:B 2024-11-20T19:27:31,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:31,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:31,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130911594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130911594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130911596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130911596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130911597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130911698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130911698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130911699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130911700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130911700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,836 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/9154e73852bd40e6a3208a0de8f79949 2024-11-20T19:27:31,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/123f71e217594439a164b80553efc488 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/123f71e217594439a164b80553efc488 2024-11-20T19:27:31,841 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/123f71e217594439a164b80553efc488, entries=150, sequenceid=76, filesize=30.2 K 2024-11-20T19:27:31,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/001cf4d2dbd844f1beec6c7f85e88ccb as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/001cf4d2dbd844f1beec6c7f85e88ccb 2024-11-20T19:27:31,844 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/001cf4d2dbd844f1beec6c7f85e88ccb, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T19:27:31,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/9154e73852bd40e6a3208a0de8f79949 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9154e73852bd40e6a3208a0de8f79949 2024-11-20T19:27:31,847 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9154e73852bd40e6a3208a0de8f79949, entries=150, sequenceid=76, filesize=11.7 K 2024-11-20T19:27:31,847 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d8a34887157cb9c8687afaaeab650abb in 658ms, sequenceid=76, compaction requested=false 2024-11-20T19:27:31,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:31,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:31,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-20T19:27:31,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-20T19:27:31,849 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-20T19:27:31,849 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4210 sec 2024-11-20T19:27:31,850 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 1.4260 sec 2024-11-20T19:27:31,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:31,902 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:27:31,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:31,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:31,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:31,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:31,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:31,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:31,907 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200be508f4cffd40cdb88e6dce24b10ec2_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130851901/Put/seqid=0 2024-11-20T19:27:31,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742488_1664 (size=12154) 2024-11-20T19:27:31,914 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:31,917 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411200be508f4cffd40cdb88e6dce24b10ec2_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200be508f4cffd40cdb88e6dce24b10ec2_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:31,918 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/9006ba5ed2db42938f6f11a0cda7fd86, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:31,918 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/9006ba5ed2db42938f6f11a0cda7fd86 is 175, key is test_row_0/A:col10/1732130851901/Put/seqid=0 2024-11-20T19:27:31,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130911918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130911918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130911919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130911919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:31,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130911920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:31,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742489_1665 (size=30955) 2024-11-20T19:27:31,930 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=92, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/9006ba5ed2db42938f6f11a0cda7fd86 2024-11-20T19:27:31,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/93951d762abe4243a2c68484d41ca689 is 50, key is test_row_0/B:col10/1732130851901/Put/seqid=0 2024-11-20T19:27:31,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742490_1666 (size=12001) 2024-11-20T19:27:32,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:32,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130912023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:32,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130912023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:32,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130912023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:32,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130912023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:32,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130912023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:32,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130912225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:32,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130912225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:32,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130912226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:32,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130912227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:32,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130912227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/93951d762abe4243a2c68484d41ca689 2024-11-20T19:27:32,346 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/abc40d64db1e41f79c302c0182932b94 is 50, key is test_row_0/C:col10/1732130851901/Put/seqid=0 2024-11-20T19:27:32,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742491_1667 (size=12001) 2024-11-20T19:27:32,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-20T19:27:32,529 INFO [Thread-2818 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-20T19:27:32,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:32,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130912528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,530 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:32,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-20T19:27:32,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T19:27:32,531 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:32,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:32,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130912529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,531 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:32,531 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:32,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:32,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130912530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:32,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130912531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:32,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130912531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T19:27:32,682 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,682 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T19:27:32,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:32,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:32,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:32,683 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:32,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:32,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:32,749 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/abc40d64db1e41f79c302c0182932b94 2024-11-20T19:27:32,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/9006ba5ed2db42938f6f11a0cda7fd86 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9006ba5ed2db42938f6f11a0cda7fd86 2024-11-20T19:27:32,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9006ba5ed2db42938f6f11a0cda7fd86, entries=150, sequenceid=92, filesize=30.2 K 2024-11-20T19:27:32,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/93951d762abe4243a2c68484d41ca689 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/93951d762abe4243a2c68484d41ca689 2024-11-20T19:27:32,757 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/93951d762abe4243a2c68484d41ca689, entries=150, sequenceid=92, filesize=11.7 K 2024-11-20T19:27:32,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/abc40d64db1e41f79c302c0182932b94 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/abc40d64db1e41f79c302c0182932b94 2024-11-20T19:27:32,760 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/abc40d64db1e41f79c302c0182932b94, entries=150, sequenceid=92, filesize=11.7 K 2024-11-20T19:27:32,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d8a34887157cb9c8687afaaeab650abb in 858ms, sequenceid=92, compaction requested=true 2024-11-20T19:27:32,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:32,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:32,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:32,761 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:32,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:32,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:32,761 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:32,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:32,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:32,761 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:32,761 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:32,761 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/A is initiating minor compaction (all files) 2024-11-20T19:27:32,761 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/B is initiating minor compaction (all files) 2024-11-20T19:27:32,761 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/A in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:32,761 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/B in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:32,761 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9b89cfcca911448c803bc8cbda4d81fe, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/123f71e217594439a164b80553efc488, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9006ba5ed2db42938f6f11a0cda7fd86] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=90.8 K 2024-11-20T19:27:32,761 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/adc5c829bb5f425caf1a18ffe5a3198f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/001cf4d2dbd844f1beec6c7f85e88ccb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/93951d762abe4243a2c68484d41ca689] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=35.3 K 2024-11-20T19:27:32,761 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:32,762 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9b89cfcca911448c803bc8cbda4d81fe, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/123f71e217594439a164b80553efc488, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9006ba5ed2db42938f6f11a0cda7fd86] 2024-11-20T19:27:32,762 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting adc5c829bb5f425caf1a18ffe5a3198f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130850407 2024-11-20T19:27:32,762 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b89cfcca911448c803bc8cbda4d81fe, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130850407 2024-11-20T19:27:32,762 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 123f71e217594439a164b80553efc488, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732130850469 2024-11-20T19:27:32,762 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 001cf4d2dbd844f1beec6c7f85e88ccb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732130850469 2024-11-20T19:27:32,762 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9006ba5ed2db42938f6f11a0cda7fd86, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732130851588 2024-11-20T19:27:32,762 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 93951d762abe4243a2c68484d41ca689, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732130851588 2024-11-20T19:27:32,766 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:32,767 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411209043a4fef3da4a7da317543222607062_d8a34887157cb9c8687afaaeab650abb store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:32,768 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#B#compaction#570 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:32,768 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/41d5ae68574f42eaa3c3deb61ded489d is 50, key is test_row_0/B:col10/1732130851901/Put/seqid=0 2024-11-20T19:27:32,769 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411209043a4fef3da4a7da317543222607062_d8a34887157cb9c8687afaaeab650abb, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:32,769 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411209043a4fef3da4a7da317543222607062_d8a34887157cb9c8687afaaeab650abb because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:32,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742492_1668 (size=12207) 2024-11-20T19:27:32,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742493_1669 (size=4469) 2024-11-20T19:27:32,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T19:27:32,834 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:32,835 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-20T19:27:32,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:32,835 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T19:27:32,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:32,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:32,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:32,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:32,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:32,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:32,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dd7bf7d9e33f4a01a8cdcff07b2dc927_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130851906/Put/seqid=0 2024-11-20T19:27:32,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742494_1670 (size=12154) 2024-11-20T19:27:33,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:33,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:33,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130913039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130913040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130913040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130913041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130913043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T19:27:33,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130913144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130913144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130913144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130913144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130913146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,174 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/41d5ae68574f42eaa3c3deb61ded489d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/41d5ae68574f42eaa3c3deb61ded489d 2024-11-20T19:27:33,176 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#A#compaction#571 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:33,177 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/2057403d29724c7d9bf58ba8ee82f815 is 175, key is test_row_0/A:col10/1732130851901/Put/seqid=0 2024-11-20T19:27:33,177 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/B of d8a34887157cb9c8687afaaeab650abb into 41d5ae68574f42eaa3c3deb61ded489d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:33,178 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:33,178 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/B, priority=13, startTime=1732130852761; duration=0sec 2024-11-20T19:27:33,178 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:33,178 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:B 2024-11-20T19:27:33,178 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:33,178 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:33,179 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/C is initiating minor compaction (all files) 2024-11-20T19:27:33,179 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/C in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:33,179 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/a0af1d1c2ae342f0a77f3c186598813b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9154e73852bd40e6a3208a0de8f79949, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/abc40d64db1e41f79c302c0182932b94] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=35.3 K 2024-11-20T19:27:33,179 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting a0af1d1c2ae342f0a77f3c186598813b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732130850407 2024-11-20T19:27:33,179 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 9154e73852bd40e6a3208a0de8f79949, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732130850469 2024-11-20T19:27:33,180 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting abc40d64db1e41f79c302c0182932b94, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732130851588 2024-11-20T19:27:33,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742495_1671 (size=31161) 2024-11-20T19:27:33,183 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/2057403d29724c7d9bf58ba8ee82f815 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/2057403d29724c7d9bf58ba8ee82f815 2024-11-20T19:27:33,185 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#C#compaction#573 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:33,185 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/A of d8a34887157cb9c8687afaaeab650abb into 2057403d29724c7d9bf58ba8ee82f815(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:33,185 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:33,185 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/6e0a79a19ce84f63aa535b2ecbf101d5 is 50, key is test_row_0/C:col10/1732130851901/Put/seqid=0 2024-11-20T19:27:33,185 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/A, priority=13, startTime=1732130852761; duration=0sec 2024-11-20T19:27:33,186 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:33,186 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:A 2024-11-20T19:27:33,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742496_1672 (size=12207) 2024-11-20T19:27:33,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:33,248 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120dd7bf7d9e33f4a01a8cdcff07b2dc927_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dd7bf7d9e33f4a01a8cdcff07b2dc927_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:33,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/f240454ce4624855a0df47e41cfc5c1f, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:33,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/f240454ce4624855a0df47e41cfc5c1f is 175, key is test_row_0/A:col10/1732130851906/Put/seqid=0 2024-11-20T19:27:33,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742497_1673 (size=30955) 2024-11-20T19:27:33,252 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=115, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/f240454ce4624855a0df47e41cfc5c1f 2024-11-20T19:27:33,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/417f6d29c8ef4b49a74860b9c3407fd1 is 50, key is test_row_0/B:col10/1732130851906/Put/seqid=0 2024-11-20T19:27:33,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742498_1674 (size=12001) 2024-11-20T19:27:33,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130913346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130913346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130913347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130913347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130913348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,609 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/6e0a79a19ce84f63aa535b2ecbf101d5 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/6e0a79a19ce84f63aa535b2ecbf101d5 2024-11-20T19:27:33,612 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/C of d8a34887157cb9c8687afaaeab650abb into 6e0a79a19ce84f63aa535b2ecbf101d5(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:33,612 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:33,612 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/C, priority=13, startTime=1732130852761; duration=0sec 2024-11-20T19:27:33,612 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:33,612 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:C 2024-11-20T19:27:33,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T19:27:33,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130913648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130913650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130913650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130913650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,654 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:33,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130913652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:33,659 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/417f6d29c8ef4b49a74860b9c3407fd1 2024-11-20T19:27:33,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/4840e2b5321646a3aa957cb0d2c7a6b0 is 50, key is test_row_0/C:col10/1732130851906/Put/seqid=0 2024-11-20T19:27:33,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742499_1675 (size=12001) 2024-11-20T19:27:34,067 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/4840e2b5321646a3aa957cb0d2c7a6b0 2024-11-20T19:27:34,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/f240454ce4624855a0df47e41cfc5c1f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f240454ce4624855a0df47e41cfc5c1f 2024-11-20T19:27:34,073 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f240454ce4624855a0df47e41cfc5c1f, entries=150, sequenceid=115, filesize=30.2 K 2024-11-20T19:27:34,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/417f6d29c8ef4b49a74860b9c3407fd1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/417f6d29c8ef4b49a74860b9c3407fd1 2024-11-20T19:27:34,075 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/417f6d29c8ef4b49a74860b9c3407fd1, entries=150, sequenceid=115, filesize=11.7 K 2024-11-20T19:27:34,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/4840e2b5321646a3aa957cb0d2c7a6b0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/4840e2b5321646a3aa957cb0d2c7a6b0 2024-11-20T19:27:34,078 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/4840e2b5321646a3aa957cb0d2c7a6b0, entries=150, sequenceid=115, filesize=11.7 K 2024-11-20T19:27:34,079 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d8a34887157cb9c8687afaaeab650abb in 1244ms, sequenceid=115, compaction requested=false 2024-11-20T19:27:34,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:34,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:34,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-20T19:27:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-20T19:27:34,080 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-20T19:27:34,080 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5490 sec 2024-11-20T19:27:34,081 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 1.5510 sec 2024-11-20T19:27:34,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:34,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-20T19:27:34,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:34,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:34,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:34,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:34,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:34,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:34,161 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c024178969064940a8751c6d4c14b935_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130854153/Put/seqid=0 2024-11-20T19:27:34,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742500_1676 (size=12204) 2024-11-20T19:27:34,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130914199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130914200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130914202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130914203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130914203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130914304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130914304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130914305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,308 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130914306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130914307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130914506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130914507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130914508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130914509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130914511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,576 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:34,579 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c024178969064940a8751c6d4c14b935_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c024178969064940a8751c6d4c14b935_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:34,579 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/9c6eaac2e9264daf8071c80c8d379ce1, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:34,580 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/9c6eaac2e9264daf8071c80c8d379ce1 is 175, key is test_row_0/A:col10/1732130854153/Put/seqid=0 2024-11-20T19:27:34,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742501_1677 (size=31005) 2024-11-20T19:27:34,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-20T19:27:34,634 INFO [Thread-2818 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-20T19:27:34,635 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:34,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-20T19:27:34,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T19:27:34,636 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:34,636 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:34,637 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:34,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T19:27:34,788 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T19:27:34,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:34,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:34,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:34,788 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:34,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:34,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:34,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130914810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130914811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130914812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130914812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:34,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130914813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T19:27:34,940 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:34,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T19:27:34,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:34,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:34,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:34,940 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:34,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:34,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:34,983 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/9c6eaac2e9264daf8071c80c8d379ce1 2024-11-20T19:27:34,987 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/d01ab141f1764befaa0ce2320d523e7f is 50, key is test_row_0/B:col10/1732130854153/Put/seqid=0 2024-11-20T19:27:34,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742502_1678 (size=12051) 2024-11-20T19:27:35,092 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:35,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T19:27:35,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:35,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:35,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:35,093 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:35,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:35,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:35,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T19:27:35,244 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:35,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T19:27:35,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:35,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:35,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:35,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:35,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:35,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:35,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130915313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:35,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:35,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130915314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:35,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:35,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130915315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:35,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:35,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130915317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:35,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:35,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130915319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:35,391 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/d01ab141f1764befaa0ce2320d523e7f 2024-11-20T19:27:35,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/9840103e218349f48327b7232e0014ba is 50, key is test_row_0/C:col10/1732130854153/Put/seqid=0 2024-11-20T19:27:35,396 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:35,397 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T19:27:35,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:35,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:35,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:35,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:35,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:35,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:35,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742503_1679 (size=12051) 2024-11-20T19:27:35,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/9840103e218349f48327b7232e0014ba 2024-11-20T19:27:35,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/9c6eaac2e9264daf8071c80c8d379ce1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9c6eaac2e9264daf8071c80c8d379ce1 2024-11-20T19:27:35,404 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9c6eaac2e9264daf8071c80c8d379ce1, entries=150, sequenceid=132, filesize=30.3 K 2024-11-20T19:27:35,405 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/d01ab141f1764befaa0ce2320d523e7f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/d01ab141f1764befaa0ce2320d523e7f 2024-11-20T19:27:35,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/d01ab141f1764befaa0ce2320d523e7f, entries=150, sequenceid=132, filesize=11.8 K 2024-11-20T19:27:35,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/9840103e218349f48327b7232e0014ba as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9840103e218349f48327b7232e0014ba 2024-11-20T19:27:35,410 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9840103e218349f48327b7232e0014ba, entries=150, sequenceid=132, filesize=11.8 K 2024-11-20T19:27:35,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d8a34887157cb9c8687afaaeab650abb in 1257ms, sequenceid=132, compaction requested=true 2024-11-20T19:27:35,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:35,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:35,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:35,411 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:35,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:35,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:35,411 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:35,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:35,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:35,412 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93121 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:35,412 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:35,412 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/A is initiating minor compaction (all files) 2024-11-20T19:27:35,412 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/B is initiating minor compaction (all files) 2024-11-20T19:27:35,413 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/B in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:35,413 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/A in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:35,413 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/41d5ae68574f42eaa3c3deb61ded489d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/417f6d29c8ef4b49a74860b9c3407fd1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/d01ab141f1764befaa0ce2320d523e7f] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=35.4 K 2024-11-20T19:27:35,413 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/2057403d29724c7d9bf58ba8ee82f815, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f240454ce4624855a0df47e41cfc5c1f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9c6eaac2e9264daf8071c80c8d379ce1] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=90.9 K 2024-11-20T19:27:35,413 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:35,413 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/2057403d29724c7d9bf58ba8ee82f815, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f240454ce4624855a0df47e41cfc5c1f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9c6eaac2e9264daf8071c80c8d379ce1] 2024-11-20T19:27:35,413 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 41d5ae68574f42eaa3c3deb61ded489d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732130851588 2024-11-20T19:27:35,413 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2057403d29724c7d9bf58ba8ee82f815, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732130851588 2024-11-20T19:27:35,413 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 417f6d29c8ef4b49a74860b9c3407fd1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732130851906 2024-11-20T19:27:35,413 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting f240454ce4624855a0df47e41cfc5c1f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732130851906 2024-11-20T19:27:35,413 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting d01ab141f1764befaa0ce2320d523e7f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732130853040 2024-11-20T19:27:35,413 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c6eaac2e9264daf8071c80c8d379ce1, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732130853040 2024-11-20T19:27:35,425 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:35,425 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#B#compaction#579 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:35,426 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/7c779ff7f0974a18b630560723794d13 is 50, key is test_row_0/B:col10/1732130854153/Put/seqid=0 2024-11-20T19:27:35,427 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112077184e8ec66f4eb6911f96b8b7b25067_d8a34887157cb9c8687afaaeab650abb store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:35,428 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112077184e8ec66f4eb6911f96b8b7b25067_d8a34887157cb9c8687afaaeab650abb, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:35,429 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112077184e8ec66f4eb6911f96b8b7b25067_d8a34887157cb9c8687afaaeab650abb because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:35,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742505_1681 (size=4469) 2024-11-20T19:27:35,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742504_1680 (size=12359) 2024-11-20T19:27:35,434 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/7c779ff7f0974a18b630560723794d13 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/7c779ff7f0974a18b630560723794d13 2024-11-20T19:27:35,437 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/B of d8a34887157cb9c8687afaaeab650abb into 7c779ff7f0974a18b630560723794d13(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:35,437 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:35,438 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/B, priority=13, startTime=1732130855411; duration=0sec 2024-11-20T19:27:35,438 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:35,438 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:B 2024-11-20T19:27:35,438 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:35,438 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:35,438 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/C is initiating minor compaction (all files) 2024-11-20T19:27:35,439 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/C in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:35,439 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/6e0a79a19ce84f63aa535b2ecbf101d5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/4840e2b5321646a3aa957cb0d2c7a6b0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9840103e218349f48327b7232e0014ba] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=35.4 K 2024-11-20T19:27:35,439 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e0a79a19ce84f63aa535b2ecbf101d5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732130851588 2024-11-20T19:27:35,439 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 4840e2b5321646a3aa957cb0d2c7a6b0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732130851906 2024-11-20T19:27:35,439 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 9840103e218349f48327b7232e0014ba, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732130853040 2024-11-20T19:27:35,444 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#C#compaction#581 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:35,444 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/5259f8ff136542b693fd2b3a2a7e001f is 50, key is test_row_0/C:col10/1732130854153/Put/seqid=0 2024-11-20T19:27:35,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742506_1682 (size=12359) 2024-11-20T19:27:35,549 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:35,549 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-20T19:27:35,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:35,550 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T19:27:35,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:35,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:35,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:35,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:35,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:35,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:35,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202461fce0f1e045719c7bd8719dca335e_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130854202/Put/seqid=0 2024-11-20T19:27:35,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742507_1683 (size=12304) 2024-11-20T19:27:35,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T19:27:35,832 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#A#compaction#580 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:35,833 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/b394a62bfc3c4cbfb3389f6405706d4f is 175, key is test_row_0/A:col10/1732130854153/Put/seqid=0 2024-11-20T19:27:35,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742508_1684 (size=31313) 2024-11-20T19:27:35,856 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/5259f8ff136542b693fd2b3a2a7e001f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/5259f8ff136542b693fd2b3a2a7e001f 2024-11-20T19:27:35,859 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/C of d8a34887157cb9c8687afaaeab650abb into 5259f8ff136542b693fd2b3a2a7e001f(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:35,859 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:35,859 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/C, priority=13, startTime=1732130855411; duration=0sec 2024-11-20T19:27:35,859 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:35,859 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:C 2024-11-20T19:27:35,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:35,961 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411202461fce0f1e045719c7bd8719dca335e_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202461fce0f1e045719c7bd8719dca335e_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:35,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/ca411eefc1154d348575047304d35eec, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:35,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/ca411eefc1154d348575047304d35eec is 175, key is test_row_0/A:col10/1732130854202/Put/seqid=0 2024-11-20T19:27:35,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742509_1685 (size=31105) 2024-11-20T19:27:36,239 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/b394a62bfc3c4cbfb3389f6405706d4f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/b394a62bfc3c4cbfb3389f6405706d4f 2024-11-20T19:27:36,242 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/A of d8a34887157cb9c8687afaaeab650abb into b394a62bfc3c4cbfb3389f6405706d4f(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:36,242 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:36,242 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/A, priority=13, startTime=1732130855411; duration=0sec 2024-11-20T19:27:36,242 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:36,242 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:A 2024-11-20T19:27:36,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:36,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:36,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130916327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130916327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130916328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130916329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130916330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,365 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=155, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/ca411eefc1154d348575047304d35eec 2024-11-20T19:27:36,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/af7d6eaa9d184d2f8f79e2756dd52230 is 50, key is test_row_0/B:col10/1732130854202/Put/seqid=0 2024-11-20T19:27:36,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742510_1686 (size=12151) 2024-11-20T19:27:36,380 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/af7d6eaa9d184d2f8f79e2756dd52230 2024-11-20T19:27:36,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/e7deae69d57940d5b5fa51243a4a50d7 is 50, key is test_row_0/C:col10/1732130854202/Put/seqid=0 2024-11-20T19:27:36,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742511_1687 (size=12151) 2024-11-20T19:27:36,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130916431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130916431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130916431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130916431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130916432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130916633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130916634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130916634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130916634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130916635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T19:27:36,788 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/e7deae69d57940d5b5fa51243a4a50d7 2024-11-20T19:27:36,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/ca411eefc1154d348575047304d35eec as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/ca411eefc1154d348575047304d35eec 2024-11-20T19:27:36,793 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/ca411eefc1154d348575047304d35eec, entries=150, sequenceid=155, filesize=30.4 K 2024-11-20T19:27:36,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/af7d6eaa9d184d2f8f79e2756dd52230 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/af7d6eaa9d184d2f8f79e2756dd52230 2024-11-20T19:27:36,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,796 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/af7d6eaa9d184d2f8f79e2756dd52230, entries=150, sequenceid=155, filesize=11.9 K 2024-11-20T19:27:36,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/e7deae69d57940d5b5fa51243a4a50d7 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/e7deae69d57940d5b5fa51243a4a50d7 2024-11-20T19:27:36,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,798 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/e7deae69d57940d5b5fa51243a4a50d7, entries=150, sequenceid=155, filesize=11.9 K 2024-11-20T19:27:36,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,799 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for d8a34887157cb9c8687afaaeab650abb in 1250ms, sequenceid=155, compaction requested=false 2024-11-20T19:27:36,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:36,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:36,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-20T19:27:36,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-20T19:27:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,801 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-20T19:27:36,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,801 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1640 sec 2024-11-20T19:27:36,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,802 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 2.1660 sec 2024-11-20T19:27:36,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:36,937 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T19:27:36,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:36,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:36,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:36,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:36,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:36,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:36,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f561771c8b0643288e7ede392e91dd2b_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130856327/Put/seqid=0 2024-11-20T19:27:36,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742512_1688 (size=12304) 2024-11-20T19:27:36,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:36,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130916951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130916955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130916955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130916956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:36,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:36,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130916956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:37,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:37,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130917057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:37,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130917059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:37,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130917059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:37,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:37,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130917060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:37,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:37,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130917060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:37,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:37,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130917260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:37,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:37,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130917261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:37,263 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:37,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130917261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:37,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:37,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130917262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:37,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:37,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130917263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:37,348 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:37,351 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120f561771c8b0643288e7ede392e91dd2b_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f561771c8b0643288e7ede392e91dd2b_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:37,351 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/9ad1702623c4462ca791c54d917c40cc, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:37,352 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/9ad1702623c4462ca791c54d917c40cc is 175, key is test_row_0/A:col10/1732130856327/Put/seqid=0 2024-11-20T19:27:37,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742513_1689 (size=31105) 2024-11-20T19:27:37,355 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=172, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/9ad1702623c4462ca791c54d917c40cc 2024-11-20T19:27:37,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/acf77aed70f0425eb393302e3a9c99fd is 50, key is test_row_0/B:col10/1732130856327/Put/seqid=0 2024-11-20T19:27:37,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742514_1690 (size=12151) 2024-11-20T19:27:37,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:37,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130917564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:37,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:37,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130917566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:37,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:37,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130917566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:37,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:37,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130917566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:37,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:37,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130917566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:37,763 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/acf77aed70f0425eb393302e3a9c99fd 2024-11-20T19:27:37,768 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/9722129a01964b4893db669edc4813ee is 50, key is test_row_0/C:col10/1732130856327/Put/seqid=0 2024-11-20T19:27:37,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742515_1691 (size=12151) 2024-11-20T19:27:38,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:38,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130918068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:38,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:38,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130918069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:38,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:38,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130918069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:38,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:38,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130918069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:38,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:38,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130918070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:38,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/9722129a01964b4893db669edc4813ee 2024-11-20T19:27:38,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/9ad1702623c4462ca791c54d917c40cc as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9ad1702623c4462ca791c54d917c40cc 2024-11-20T19:27:38,176 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9ad1702623c4462ca791c54d917c40cc, entries=150, sequenceid=172, filesize=30.4 K 2024-11-20T19:27:38,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/acf77aed70f0425eb393302e3a9c99fd as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/acf77aed70f0425eb393302e3a9c99fd 2024-11-20T19:27:38,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/acf77aed70f0425eb393302e3a9c99fd, entries=150, sequenceid=172, filesize=11.9 K 2024-11-20T19:27:38,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/9722129a01964b4893db669edc4813ee as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9722129a01964b4893db669edc4813ee 2024-11-20T19:27:38,181 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9722129a01964b4893db669edc4813ee, entries=150, sequenceid=172, filesize=11.9 K 2024-11-20T19:27:38,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for d8a34887157cb9c8687afaaeab650abb in 1245ms, sequenceid=172, compaction requested=true 2024-11-20T19:27:38,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:38,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:38,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:38,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:38,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:38,182 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:38,182 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:38,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:38,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:38,183 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:38,183 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93523 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:38,183 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/A is initiating minor compaction (all files) 2024-11-20T19:27:38,183 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/B is initiating minor compaction (all files) 2024-11-20T19:27:38,183 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/B in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:38,183 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/A in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:38,183 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/b394a62bfc3c4cbfb3389f6405706d4f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/ca411eefc1154d348575047304d35eec, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9ad1702623c4462ca791c54d917c40cc] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=91.3 K 2024-11-20T19:27:38,183 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/7c779ff7f0974a18b630560723794d13, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/af7d6eaa9d184d2f8f79e2756dd52230, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/acf77aed70f0425eb393302e3a9c99fd] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=35.8 K 2024-11-20T19:27:38,183 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:38,183 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/b394a62bfc3c4cbfb3389f6405706d4f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/ca411eefc1154d348575047304d35eec, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9ad1702623c4462ca791c54d917c40cc] 2024-11-20T19:27:38,183 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c779ff7f0974a18b630560723794d13, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732130853040 2024-11-20T19:27:38,183 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting b394a62bfc3c4cbfb3389f6405706d4f, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732130853040 2024-11-20T19:27:38,183 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting af7d6eaa9d184d2f8f79e2756dd52230, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732130854198 2024-11-20T19:27:38,184 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting acf77aed70f0425eb393302e3a9c99fd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130856327 2024-11-20T19:27:38,184 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca411eefc1154d348575047304d35eec, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732130854198 2024-11-20T19:27:38,184 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ad1702623c4462ca791c54d917c40cc, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130856327 2024-11-20T19:27:38,187 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:38,188 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#B#compaction#588 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:38,188 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/ec8d164912f041cd964137ce86abf987 is 50, key is test_row_0/B:col10/1732130856327/Put/seqid=0 2024-11-20T19:27:38,189 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112074854422b56c4ef0852c55eea61a547c_d8a34887157cb9c8687afaaeab650abb store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:38,190 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112074854422b56c4ef0852c55eea61a547c_d8a34887157cb9c8687afaaeab650abb, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:38,190 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112074854422b56c4ef0852c55eea61a547c_d8a34887157cb9c8687afaaeab650abb because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:38,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742516_1692 (size=4469) 2024-11-20T19:27:38,194 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#A#compaction#589 average throughput is 3.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:38,194 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/cbb01a60e839454a90f741ead74c984b is 175, key is test_row_0/A:col10/1732130856327/Put/seqid=0 2024-11-20T19:27:38,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742517_1693 (size=12561) 2024-11-20T19:27:38,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742518_1694 (size=31515) 2024-11-20T19:27:38,634 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/cbb01a60e839454a90f741ead74c984b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cbb01a60e839454a90f741ead74c984b 2024-11-20T19:27:38,634 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/ec8d164912f041cd964137ce86abf987 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/ec8d164912f041cd964137ce86abf987 2024-11-20T19:27:38,638 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/A of d8a34887157cb9c8687afaaeab650abb into cbb01a60e839454a90f741ead74c984b(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:38,638 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/B of d8a34887157cb9c8687afaaeab650abb into ec8d164912f041cd964137ce86abf987(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:38,638 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:38,638 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:38,638 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/A, priority=13, startTime=1732130858182; duration=0sec 2024-11-20T19:27:38,638 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/B, priority=13, startTime=1732130858182; duration=0sec 2024-11-20T19:27:38,638 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:38,638 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:38,638 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:B 2024-11-20T19:27:38,638 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:A 2024-11-20T19:27:38,638 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:38,639 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:38,639 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/C is initiating minor compaction (all files) 2024-11-20T19:27:38,639 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/C in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:38,639 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/5259f8ff136542b693fd2b3a2a7e001f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/e7deae69d57940d5b5fa51243a4a50d7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9722129a01964b4893db669edc4813ee] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=35.8 K 2024-11-20T19:27:38,640 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 5259f8ff136542b693fd2b3a2a7e001f, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732130853040 2024-11-20T19:27:38,640 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting e7deae69d57940d5b5fa51243a4a50d7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732130854198 2024-11-20T19:27:38,640 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 9722129a01964b4893db669edc4813ee, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130856327 2024-11-20T19:27:38,646 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#C#compaction#590 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:38,646 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/d77dfc76bfca44049052239b8b37baac is 50, key is test_row_0/C:col10/1732130856327/Put/seqid=0 2024-11-20T19:27:38,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742519_1695 (size=12561) 2024-11-20T19:27:38,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-20T19:27:38,740 INFO [Thread-2818 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-20T19:27:38,741 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:38,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-20T19:27:38,742 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:38,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T19:27:38,742 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:38,743 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:38,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T19:27:38,894 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:38,894 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-20T19:27:38,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:38,894 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-20T19:27:38,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:38,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:38,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:38,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:38,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:38,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:38,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b8218e14c4bc4a6ea944e7e19cc9aa32_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130856954/Put/seqid=0 2024-11-20T19:27:38,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742520_1696 (size=12304) 2024-11-20T19:27:39,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T19:27:39,053 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/d77dfc76bfca44049052239b8b37baac as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/d77dfc76bfca44049052239b8b37baac 2024-11-20T19:27:39,055 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/C of d8a34887157cb9c8687afaaeab650abb into d77dfc76bfca44049052239b8b37baac(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:39,056 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:39,056 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/C, priority=13, startTime=1732130858182; duration=0sec 2024-11-20T19:27:39,056 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:39,056 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:C 2024-11-20T19:27:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:39,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:39,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130919081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130919083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130919083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130919084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130919084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130919185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130919186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130919186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130919188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130919188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:39,306 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b8218e14c4bc4a6ea944e7e19cc9aa32_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b8218e14c4bc4a6ea944e7e19cc9aa32_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:39,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/34ef6e9fde9d4f6cbac6ca01ba9774d3, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:39,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/34ef6e9fde9d4f6cbac6ca01ba9774d3 is 175, key is test_row_0/A:col10/1732130856954/Put/seqid=0 2024-11-20T19:27:39,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742521_1697 (size=31105) 2024-11-20T19:27:39,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T19:27:39,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130919388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130919389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130919389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130919390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130919391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130919691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130919691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130919692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130919693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:39,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130919695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:39,710 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=196, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/34ef6e9fde9d4f6cbac6ca01ba9774d3 2024-11-20T19:27:39,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/9cb87ca38e6c463c88296b555c5ad4a1 is 50, key is test_row_0/B:col10/1732130856954/Put/seqid=0 2024-11-20T19:27:39,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742522_1698 (size=12151) 2024-11-20T19:27:39,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T19:27:40,118 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/9cb87ca38e6c463c88296b555c5ad4a1 2024-11-20T19:27:40,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/68a139a3e7074bef85521fd421cb42c4 is 50, key is test_row_0/C:col10/1732130856954/Put/seqid=0 2024-11-20T19:27:40,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742523_1699 (size=12151) 2024-11-20T19:27:40,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:40,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130920194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:40,197 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:40,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130920196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:40,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:40,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130920198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:40,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:40,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:40,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130920199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:40,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130920199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:40,526 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/68a139a3e7074bef85521fd421cb42c4 2024-11-20T19:27:40,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/34ef6e9fde9d4f6cbac6ca01ba9774d3 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/34ef6e9fde9d4f6cbac6ca01ba9774d3 2024-11-20T19:27:40,531 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/34ef6e9fde9d4f6cbac6ca01ba9774d3, entries=150, sequenceid=196, filesize=30.4 K 2024-11-20T19:27:40,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/9cb87ca38e6c463c88296b555c5ad4a1 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/9cb87ca38e6c463c88296b555c5ad4a1 2024-11-20T19:27:40,534 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/9cb87ca38e6c463c88296b555c5ad4a1, entries=150, sequenceid=196, filesize=11.9 K 2024-11-20T19:27:40,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/68a139a3e7074bef85521fd421cb42c4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/68a139a3e7074bef85521fd421cb42c4 2024-11-20T19:27:40,537 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/68a139a3e7074bef85521fd421cb42c4, entries=150, sequenceid=196, filesize=11.9 K 2024-11-20T19:27:40,537 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for d8a34887157cb9c8687afaaeab650abb in 1643ms, sequenceid=196, compaction requested=false 2024-11-20T19:27:40,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:40,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:40,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-20T19:27:40,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-20T19:27:40,539 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-20T19:27:40,539 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7960 sec 2024-11-20T19:27:40,540 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 1.7980 sec 2024-11-20T19:27:40,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-20T19:27:40,845 INFO [Thread-2818 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-20T19:27:40,846 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:40,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-20T19:27:40,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T19:27:40,847 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:40,847 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:40,848 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:40,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T19:27:40,998 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:40,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-20T19:27:40,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:40,999 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-20T19:27:40,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:40,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:40,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:40,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:40,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:40,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:41,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112086b1c496e7c14b62b7a982ffdf4efad8_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130859083/Put/seqid=0 2024-11-20T19:27:41,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742524_1700 (size=12304) 2024-11-20T19:27:41,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T19:27:41,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:41,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:41,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130921212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130921212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130921213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130921214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130921215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130921315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130921316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130921316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130921317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130921318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:41,409 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112086b1c496e7c14b62b7a982ffdf4efad8_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112086b1c496e7c14b62b7a982ffdf4efad8_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:41,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/cac3d81ddf9945b49e2e657318105646, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:41,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/cac3d81ddf9945b49e2e657318105646 is 175, key is test_row_0/A:col10/1732130859083/Put/seqid=0 2024-11-20T19:27:41,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742525_1701 (size=31105) 2024-11-20T19:27:41,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T19:27:41,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130921518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130921518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130921519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130921520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130921521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,814 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=212, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/cac3d81ddf9945b49e2e657318105646 2024-11-20T19:27:41,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/577e70482d884e1792987ee08e513b37 is 50, key is test_row_0/B:col10/1732130859083/Put/seqid=0 2024-11-20T19:27:41,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742526_1702 (size=12151) 2024-11-20T19:27:41,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130921821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130921822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130921823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130921823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:41,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130921827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:41,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T19:27:42,221 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/577e70482d884e1792987ee08e513b37 2024-11-20T19:27:42,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/3fe9b634c7c042ba95879dd80888bdfb is 50, key is test_row_0/C:col10/1732130859083/Put/seqid=0 2024-11-20T19:27:42,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742527_1703 (size=12151) 2024-11-20T19:27:42,228 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/3fe9b634c7c042ba95879dd80888bdfb 2024-11-20T19:27:42,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/cac3d81ddf9945b49e2e657318105646 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cac3d81ddf9945b49e2e657318105646 2024-11-20T19:27:42,233 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cac3d81ddf9945b49e2e657318105646, entries=150, sequenceid=212, filesize=30.4 K 2024-11-20T19:27:42,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/577e70482d884e1792987ee08e513b37 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/577e70482d884e1792987ee08e513b37 2024-11-20T19:27:42,236 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/577e70482d884e1792987ee08e513b37, entries=150, sequenceid=212, filesize=11.9 K 2024-11-20T19:27:42,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/3fe9b634c7c042ba95879dd80888bdfb as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/3fe9b634c7c042ba95879dd80888bdfb 2024-11-20T19:27:42,239 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/3fe9b634c7c042ba95879dd80888bdfb, entries=150, sequenceid=212, filesize=11.9 K 2024-11-20T19:27:42,240 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for d8a34887157cb9c8687afaaeab650abb in 1241ms, sequenceid=212, compaction requested=true 2024-11-20T19:27:42,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:42,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:42,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-20T19:27:42,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-20T19:27:42,242 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-20T19:27:42,242 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3930 sec 2024-11-20T19:27:42,243 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 1.3960 sec 2024-11-20T19:27:42,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:42,329 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-20T19:27:42,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:42,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:42,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:42,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:42,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:42,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:42,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bb4a810de1034b7bafe1c375f767c198_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130861211/Put/seqid=0 2024-11-20T19:27:42,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742528_1704 (size=12304) 2024-11-20T19:27:42,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130922336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130922337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130922338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130922338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130922339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130922442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130922442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130922442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130922442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130922443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130922644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130922645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130922645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130922645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130922646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,738 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:42,740 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bb4a810de1034b7bafe1c375f767c198_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bb4a810de1034b7bafe1c375f767c198_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:42,741 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/98579d4cf8bb427db89a71eeff920377, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:42,741 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/98579d4cf8bb427db89a71eeff920377 is 175, key is test_row_0/A:col10/1732130861211/Put/seqid=0 2024-11-20T19:27:42,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742529_1705 (size=31105) 2024-11-20T19:27:42,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130922946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130922947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-20T19:27:42,950 INFO [Thread-2818 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-20T19:27:42,951 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:42,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-20T19:27:42,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130922949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130922950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:42,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T19:27:42,952 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:42,952 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:42,952 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:42,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:42,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130922951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:43,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T19:27:43,104 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:43,104 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T19:27:43,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:43,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:43,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:43,104 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:43,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:43,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:43,145 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=235, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/98579d4cf8bb427db89a71eeff920377 2024-11-20T19:27:43,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/0956740f07324b2491276cf4a27b4457 is 50, key is test_row_0/B:col10/1732130861211/Put/seqid=0 2024-11-20T19:27:43,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742530_1706 (size=12151) 2024-11-20T19:27:43,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T19:27:43,256 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:43,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T19:27:43,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:43,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:43,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:43,257 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:43,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:43,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:43,408 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:43,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T19:27:43,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:43,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:43,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:43,409 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:43,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:43,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:43,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:43,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130923449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:43,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:43,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130923453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:43,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:43,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:43,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130923453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:43,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130923453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:43,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:43,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130923454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:43,554 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/0956740f07324b2491276cf4a27b4457 2024-11-20T19:27:43,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T19:27:43,558 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/eec5c84746ee460d9f72e08375ea17fc is 50, key is test_row_0/C:col10/1732130861211/Put/seqid=0 2024-11-20T19:27:43,560 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:43,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742531_1707 (size=12151) 2024-11-20T19:27:43,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/eec5c84746ee460d9f72e08375ea17fc 2024-11-20T19:27:43,562 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T19:27:43,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:43,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:43,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:43,563 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:43,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:43,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:43,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/98579d4cf8bb427db89a71eeff920377 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/98579d4cf8bb427db89a71eeff920377 2024-11-20T19:27:43,567 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/98579d4cf8bb427db89a71eeff920377, entries=150, sequenceid=235, filesize=30.4 K 2024-11-20T19:27:43,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/0956740f07324b2491276cf4a27b4457 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/0956740f07324b2491276cf4a27b4457 2024-11-20T19:27:43,570 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/0956740f07324b2491276cf4a27b4457, entries=150, sequenceid=235, filesize=11.9 K 2024-11-20T19:27:43,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/eec5c84746ee460d9f72e08375ea17fc as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/eec5c84746ee460d9f72e08375ea17fc 2024-11-20T19:27:43,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/eec5c84746ee460d9f72e08375ea17fc, entries=150, sequenceid=235, filesize=11.9 K 2024-11-20T19:27:43,574 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d8a34887157cb9c8687afaaeab650abb in 1245ms, sequenceid=235, compaction requested=true 2024-11-20T19:27:43,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:43,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:43,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:43,574 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:43,574 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:43,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:43,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:43,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:43,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:43,576 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:43,576 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/B is initiating minor compaction (all files) 2024-11-20T19:27:43,576 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124830 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:43,576 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/B in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:43,576 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/A is initiating minor compaction (all files) 2024-11-20T19:27:43,576 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/A in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:43,576 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/ec8d164912f041cd964137ce86abf987, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/9cb87ca38e6c463c88296b555c5ad4a1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/577e70482d884e1792987ee08e513b37, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/0956740f07324b2491276cf4a27b4457] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=47.9 K 2024-11-20T19:27:43,576 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cbb01a60e839454a90f741ead74c984b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/34ef6e9fde9d4f6cbac6ca01ba9774d3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cac3d81ddf9945b49e2e657318105646, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/98579d4cf8bb427db89a71eeff920377] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=121.9 K 2024-11-20T19:27:43,576 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:43,576 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cbb01a60e839454a90f741ead74c984b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/34ef6e9fde9d4f6cbac6ca01ba9774d3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cac3d81ddf9945b49e2e657318105646, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/98579d4cf8bb427db89a71eeff920377] 2024-11-20T19:27:43,576 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting ec8d164912f041cd964137ce86abf987, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130856327 2024-11-20T19:27:43,576 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbb01a60e839454a90f741ead74c984b, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130856327 2024-11-20T19:27:43,576 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34ef6e9fde9d4f6cbac6ca01ba9774d3, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732130856949 2024-11-20T19:27:43,576 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cb87ca38e6c463c88296b555c5ad4a1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732130856949 2024-11-20T19:27:43,576 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting cac3d81ddf9945b49e2e657318105646, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732130859079 2024-11-20T19:27:43,577 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 577e70482d884e1792987ee08e513b37, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732130859079 2024-11-20T19:27:43,577 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98579d4cf8bb427db89a71eeff920377, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732130861211 2024-11-20T19:27:43,577 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 0956740f07324b2491276cf4a27b4457, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732130861211 2024-11-20T19:27:43,582 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:43,583 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#B#compaction#601 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:43,584 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/efe72fde2cd24ce2baf2610d19f27d3d is 50, key is test_row_0/B:col10/1732130861211/Put/seqid=0 2024-11-20T19:27:43,585 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120b2a75e2e0f014e2fa0d11d842ec786a8_d8a34887157cb9c8687afaaeab650abb store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:43,587 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120b2a75e2e0f014e2fa0d11d842ec786a8_d8a34887157cb9c8687afaaeab650abb, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:43,588 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120b2a75e2e0f014e2fa0d11d842ec786a8_d8a34887157cb9c8687afaaeab650abb because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:43,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742532_1708 (size=12697) 2024-11-20T19:27:43,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742533_1709 (size=4469) 2024-11-20T19:27:43,601 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#A#compaction#600 average throughput is 1.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:43,602 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/cf7d2e5a56ce45ed83c6e3f9e0322e3b is 175, key is test_row_0/A:col10/1732130861211/Put/seqid=0 2024-11-20T19:27:43,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742534_1710 (size=31651) 2024-11-20T19:27:43,608 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/cf7d2e5a56ce45ed83c6e3f9e0322e3b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cf7d2e5a56ce45ed83c6e3f9e0322e3b 2024-11-20T19:27:43,611 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/A of d8a34887157cb9c8687afaaeab650abb into cf7d2e5a56ce45ed83c6e3f9e0322e3b(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:43,611 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:43,611 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/A, priority=12, startTime=1732130863574; duration=0sec 2024-11-20T19:27:43,611 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:43,611 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:A 2024-11-20T19:27:43,611 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-20T19:27:43,612 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-20T19:27:43,612 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/C is initiating minor compaction (all files) 2024-11-20T19:27:43,612 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/C in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:43,612 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/d77dfc76bfca44049052239b8b37baac, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/68a139a3e7074bef85521fd421cb42c4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/3fe9b634c7c042ba95879dd80888bdfb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/eec5c84746ee460d9f72e08375ea17fc] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=47.9 K 2024-11-20T19:27:43,613 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d77dfc76bfca44049052239b8b37baac, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732130856327 2024-11-20T19:27:43,613 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68a139a3e7074bef85521fd421cb42c4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732130856949 2024-11-20T19:27:43,614 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fe9b634c7c042ba95879dd80888bdfb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1732130859079 2024-11-20T19:27:43,614 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting eec5c84746ee460d9f72e08375ea17fc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732130861211 2024-11-20T19:27:43,620 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#C#compaction#602 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:43,620 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/136ca4d59b62440f94ce7840e7737f26 is 50, key is test_row_0/C:col10/1732130861211/Put/seqid=0 2024-11-20T19:27:43,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742535_1711 (size=12697) 2024-11-20T19:27:43,626 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/136ca4d59b62440f94ce7840e7737f26 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/136ca4d59b62440f94ce7840e7737f26 2024-11-20T19:27:43,629 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/C of d8a34887157cb9c8687afaaeab650abb into 136ca4d59b62440f94ce7840e7737f26(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:43,629 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:43,629 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/C, priority=12, startTime=1732130863575; duration=0sec 2024-11-20T19:27:43,629 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:43,629 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:C 2024-11-20T19:27:43,714 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:43,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-20T19:27:43,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:43,715 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-20T19:27:43,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:43,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:43,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:43,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:43,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:43,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:43,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a053a58893a14dcf86538d476247ce3e_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130862337/Put/seqid=0 2024-11-20T19:27:43,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742536_1712 (size=12304) 2024-11-20T19:27:43,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:43,728 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120a053a58893a14dcf86538d476247ce3e_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a053a58893a14dcf86538d476247ce3e_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:43,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/e020761fb1c34adf93a2ff10230ca408, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:43,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/e020761fb1c34adf93a2ff10230ca408 is 175, key is test_row_0/A:col10/1732130862337/Put/seqid=0 2024-11-20T19:27:43,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742537_1713 (size=31105) 2024-11-20T19:27:43,733 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/e020761fb1c34adf93a2ff10230ca408 2024-11-20T19:27:43,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/52710942317f4357812bf237601c43c5 is 50, key is test_row_0/B:col10/1732130862337/Put/seqid=0 2024-11-20T19:27:43,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742538_1714 (size=12151) 2024-11-20T19:27:44,003 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/efe72fde2cd24ce2baf2610d19f27d3d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/efe72fde2cd24ce2baf2610d19f27d3d 2024-11-20T19:27:44,006 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/B of d8a34887157cb9c8687afaaeab650abb into efe72fde2cd24ce2baf2610d19f27d3d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:44,006 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:44,006 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/B, priority=12, startTime=1732130863574; duration=0sec 2024-11-20T19:27:44,006 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:44,006 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:B 2024-11-20T19:27:44,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T19:27:44,142 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/52710942317f4357812bf237601c43c5 2024-11-20T19:27:44,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/03d3a06bd77744388a4dce656f21060b is 50, key is test_row_0/C:col10/1732130862337/Put/seqid=0 2024-11-20T19:27:44,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742539_1715 (size=12151) 2024-11-20T19:27:44,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:44,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:44,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130924487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130924487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130924488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130924489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130924489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,549 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/03d3a06bd77744388a4dce656f21060b 2024-11-20T19:27:44,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/e020761fb1c34adf93a2ff10230ca408 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/e020761fb1c34adf93a2ff10230ca408 2024-11-20T19:27:44,557 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/e020761fb1c34adf93a2ff10230ca408, entries=150, sequenceid=250, filesize=30.4 K 2024-11-20T19:27:44,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/52710942317f4357812bf237601c43c5 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/52710942317f4357812bf237601c43c5 2024-11-20T19:27:44,563 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/52710942317f4357812bf237601c43c5, entries=150, sequenceid=250, filesize=11.9 K 2024-11-20T19:27:44,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/03d3a06bd77744388a4dce656f21060b as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/03d3a06bd77744388a4dce656f21060b 2024-11-20T19:27:44,571 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/03d3a06bd77744388a4dce656f21060b, entries=150, sequenceid=250, filesize=11.9 K 2024-11-20T19:27:44,572 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d8a34887157cb9c8687afaaeab650abb in 857ms, sequenceid=250, compaction requested=false 2024-11-20T19:27:44,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:44,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:44,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-20T19:27:44,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-20T19:27:44,575 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-20T19:27:44,575 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6220 sec 2024-11-20T19:27:44,577 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 1.6250 sec 2024-11-20T19:27:44,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:44,595 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-20T19:27:44,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:44,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:44,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:44,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:44,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:44,596 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:44,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130924610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130924610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130924614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cca2e0951bbb4679a99814b315096b6e_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130864487/Put/seqid=0 2024-11-20T19:27:44,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130924614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130924623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742540_1716 (size=12454) 2024-11-20T19:27:44,653 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:44,656 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120cca2e0951bbb4679a99814b315096b6e_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cca2e0951bbb4679a99814b315096b6e_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:44,657 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/d0eba6bf7db4457d9ad7327e54bc6aba, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:44,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/d0eba6bf7db4457d9ad7327e54bc6aba is 175, key is test_row_0/A:col10/1732130864487/Put/seqid=0 2024-11-20T19:27:44,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742541_1717 (size=31255) 2024-11-20T19:27:44,677 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=276, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/d0eba6bf7db4457d9ad7327e54bc6aba 2024-11-20T19:27:44,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/4ea308f62788475b81f330ef6f4ca7a8 is 50, key is test_row_0/B:col10/1732130864487/Put/seqid=0 2024-11-20T19:27:44,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742542_1718 (size=12301) 2024-11-20T19:27:44,702 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/4ea308f62788475b81f330ef6f4ca7a8 2024-11-20T19:27:44,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130924718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130924719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130924720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/b3353bedb5a24e9fa94bd6b349c52ef7 is 50, key is test_row_0/C:col10/1732130864487/Put/seqid=0 2024-11-20T19:27:44,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130924722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130924727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742543_1719 (size=12301) 2024-11-20T19:27:44,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130924922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130924922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130924924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130924926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:44,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:44,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130924931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-20T19:27:45,058 INFO [Thread-2818 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-20T19:27:45,059 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:45,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-20T19:27:45,060 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:45,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T19:27:45,062 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:45,062 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:45,146 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/b3353bedb5a24e9fa94bd6b349c52ef7 2024-11-20T19:27:45,150 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/d0eba6bf7db4457d9ad7327e54bc6aba as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d0eba6bf7db4457d9ad7327e54bc6aba 2024-11-20T19:27:45,154 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d0eba6bf7db4457d9ad7327e54bc6aba, entries=150, sequenceid=276, filesize=30.5 K 2024-11-20T19:27:45,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/4ea308f62788475b81f330ef6f4ca7a8 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/4ea308f62788475b81f330ef6f4ca7a8 2024-11-20T19:27:45,160 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/4ea308f62788475b81f330ef6f4ca7a8, entries=150, sequenceid=276, filesize=12.0 K 2024-11-20T19:27:45,161 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/b3353bedb5a24e9fa94bd6b349c52ef7 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/b3353bedb5a24e9fa94bd6b349c52ef7 2024-11-20T19:27:45,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T19:27:45,165 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/b3353bedb5a24e9fa94bd6b349c52ef7, entries=150, sequenceid=276, filesize=12.0 K 2024-11-20T19:27:45,165 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for d8a34887157cb9c8687afaaeab650abb in 570ms, sequenceid=276, compaction requested=true 2024-11-20T19:27:45,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:45,166 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:45,167 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94011 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:45,167 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/A is initiating minor compaction (all files) 2024-11-20T19:27:45,167 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/A in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:45,167 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cf7d2e5a56ce45ed83c6e3f9e0322e3b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/e020761fb1c34adf93a2ff10230ca408, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d0eba6bf7db4457d9ad7327e54bc6aba] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=91.8 K 2024-11-20T19:27:45,168 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:45,168 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cf7d2e5a56ce45ed83c6e3f9e0322e3b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/e020761fb1c34adf93a2ff10230ca408, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d0eba6bf7db4457d9ad7327e54bc6aba] 2024-11-20T19:27:45,168 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf7d2e5a56ce45ed83c6e3f9e0322e3b, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732130861211 2024-11-20T19:27:45,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:45,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:45,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:45,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:45,168 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:45,168 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting e020761fb1c34adf93a2ff10230ca408, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732130862336 2024-11-20T19:27:45,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:45,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:45,169 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0eba6bf7db4457d9ad7327e54bc6aba, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732130864487 2024-11-20T19:27:45,170 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:45,170 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/B is initiating minor compaction (all files) 2024-11-20T19:27:45,170 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/B in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:45,170 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/efe72fde2cd24ce2baf2610d19f27d3d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/52710942317f4357812bf237601c43c5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/4ea308f62788475b81f330ef6f4ca7a8] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=36.3 K 2024-11-20T19:27:45,171 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting efe72fde2cd24ce2baf2610d19f27d3d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732130861211 2024-11-20T19:27:45,171 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 52710942317f4357812bf237601c43c5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732130862336 2024-11-20T19:27:45,171 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ea308f62788475b81f330ef6f4ca7a8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732130864487 2024-11-20T19:27:45,179 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:45,192 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#B#compaction#610 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:45,193 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/c6cf6300525548d59168322bf1419399 is 50, key is test_row_0/B:col10/1732130864487/Put/seqid=0 2024-11-20T19:27:45,207 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411203f6c72458227416c8bbbdae8c8e28358_d8a34887157cb9c8687afaaeab650abb store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:45,209 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411203f6c72458227416c8bbbdae8c8e28358_d8a34887157cb9c8687afaaeab650abb, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:45,209 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411203f6c72458227416c8bbbdae8c8e28358_d8a34887157cb9c8687afaaeab650abb because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:45,214 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-20T19:27:45,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:45,215 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-20T19:27:45,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:45,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:45,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:45,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:45,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:45,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:45,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:45,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:45,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742544_1720 (size=12949) 2024-11-20T19:27:45,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120917246d1bf7b47eba8102d20b46274d6_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130864613/Put/seqid=0 2024-11-20T19:27:45,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742545_1721 (size=4469) 2024-11-20T19:27:45,255 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#A#compaction#609 average throughput is 0.32 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:45,256 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/05458811f80c468f84be29119559d88e is 175, key is test_row_0/A:col10/1732130864487/Put/seqid=0 2024-11-20T19:27:45,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130925250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130925254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130925255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130925256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130925256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742546_1722 (size=12454) 2024-11-20T19:27:45,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742547_1723 (size=31903) 2024-11-20T19:27:45,313 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/05458811f80c468f84be29119559d88e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/05458811f80c468f84be29119559d88e 2024-11-20T19:27:45,322 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/A of d8a34887157cb9c8687afaaeab650abb into 05458811f80c468f84be29119559d88e(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:45,322 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:45,322 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/A, priority=13, startTime=1732130865166; duration=0sec 2024-11-20T19:27:45,322 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:45,322 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:A 2024-11-20T19:27:45,322 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:45,323 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:45,323 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/C is initiating minor compaction (all files) 2024-11-20T19:27:45,323 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/C in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:45,323 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/136ca4d59b62440f94ce7840e7737f26, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/03d3a06bd77744388a4dce656f21060b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/b3353bedb5a24e9fa94bd6b349c52ef7] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=36.3 K 2024-11-20T19:27:45,324 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 136ca4d59b62440f94ce7840e7737f26, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732130861211 2024-11-20T19:27:45,324 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03d3a06bd77744388a4dce656f21060b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732130862336 2024-11-20T19:27:45,325 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3353bedb5a24e9fa94bd6b349c52ef7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732130864487 2024-11-20T19:27:45,331 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#C#compaction#612 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:45,332 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/35dcf9f634e44e66b31bd72028b470a4 is 50, key is test_row_0/C:col10/1732130864487/Put/seqid=0 2024-11-20T19:27:45,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130925357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T19:27:45,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130925359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130925361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130925361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130925362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742548_1724 (size=12949) 2024-11-20T19:27:45,386 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/35dcf9f634e44e66b31bd72028b470a4 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/35dcf9f634e44e66b31bd72028b470a4 2024-11-20T19:27:45,391 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/C of d8a34887157cb9c8687afaaeab650abb into 35dcf9f634e44e66b31bd72028b470a4(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:45,391 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:45,391 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/C, priority=13, startTime=1732130865168; duration=0sec 2024-11-20T19:27:45,391 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:45,391 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:C 2024-11-20T19:27:45,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130925560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130925564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130925565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130925565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130925567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,646 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/c6cf6300525548d59168322bf1419399 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/c6cf6300525548d59168322bf1419399 2024-11-20T19:27:45,653 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/B of d8a34887157cb9c8687afaaeab650abb into c6cf6300525548d59168322bf1419399(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:45,653 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:45,653 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/B, priority=13, startTime=1732130865168; duration=0sec 2024-11-20T19:27:45,653 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:45,654 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:B 2024-11-20T19:27:45,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T19:27:45,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:45,677 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120917246d1bf7b47eba8102d20b46274d6_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120917246d1bf7b47eba8102d20b46274d6_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:45,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/dd42fbb007854bb6856a6de2eac4214d, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:45,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/dd42fbb007854bb6856a6de2eac4214d is 175, key is test_row_0/A:col10/1732130864613/Put/seqid=0 2024-11-20T19:27:45,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742549_1725 (size=31255) 2024-11-20T19:27:45,704 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=288, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/dd42fbb007854bb6856a6de2eac4214d 2024-11-20T19:27:45,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/bc93124e60fe4390a978af6d0742903a is 50, key is test_row_0/B:col10/1732130864613/Put/seqid=0 2024-11-20T19:27:45,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742550_1726 (size=12301) 2024-11-20T19:27:45,745 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/bc93124e60fe4390a978af6d0742903a 2024-11-20T19:27:45,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/0b8d6c4cefa346c684a20aa054f77a8a is 50, key is test_row_0/C:col10/1732130864613/Put/seqid=0 2024-11-20T19:27:45,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742551_1727 (size=12301) 2024-11-20T19:27:45,785 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/0b8d6c4cefa346c684a20aa054f77a8a 2024-11-20T19:27:45,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/dd42fbb007854bb6856a6de2eac4214d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/dd42fbb007854bb6856a6de2eac4214d 2024-11-20T19:27:45,795 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/dd42fbb007854bb6856a6de2eac4214d, entries=150, sequenceid=288, filesize=30.5 K 2024-11-20T19:27:45,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/bc93124e60fe4390a978af6d0742903a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/bc93124e60fe4390a978af6d0742903a 2024-11-20T19:27:45,802 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/bc93124e60fe4390a978af6d0742903a, entries=150, sequenceid=288, filesize=12.0 K 2024-11-20T19:27:45,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/0b8d6c4cefa346c684a20aa054f77a8a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/0b8d6c4cefa346c684a20aa054f77a8a 2024-11-20T19:27:45,808 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/0b8d6c4cefa346c684a20aa054f77a8a, entries=150, sequenceid=288, filesize=12.0 K 2024-11-20T19:27:45,809 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d8a34887157cb9c8687afaaeab650abb in 594ms, sequenceid=288, compaction requested=false 2024-11-20T19:27:45,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:45,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:45,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-20T19:27:45,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-20T19:27:45,813 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-20T19:27:45,813 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 750 msec 2024-11-20T19:27:45,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 754 msec 2024-11-20T19:27:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:45,866 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-20T19:27:45,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:45,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:45,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:45,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:45,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:45,866 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:45,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d30266a988ef4e6ea47cefbc71e7b8ae_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130865865/Put/seqid=0 2024-11-20T19:27:45,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130925877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130925877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130925877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130925878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130925878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742552_1728 (size=14994) 2024-11-20T19:27:45,906 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:45,909 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d30266a988ef4e6ea47cefbc71e7b8ae_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d30266a988ef4e6ea47cefbc71e7b8ae_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:45,911 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/d75d6367b28c434f89859a42efc5e6a0, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:45,911 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/d75d6367b28c434f89859a42efc5e6a0 is 175, key is test_row_0/A:col10/1732130865865/Put/seqid=0 2024-11-20T19:27:45,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742553_1729 (size=39949) 2024-11-20T19:27:45,920 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=316, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/d75d6367b28c434f89859a42efc5e6a0 2024-11-20T19:27:45,931 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/20f4ca4086ee45f3b7abb4facb1b9cdf is 50, key is test_row_0/B:col10/1732130865865/Put/seqid=0 2024-11-20T19:27:45,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742554_1730 (size=12301) 2024-11-20T19:27:45,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130925982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130925982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130925983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130925983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:45,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:45,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130925984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-20T19:27:46,164 INFO [Thread-2818 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-20T19:27:46,165 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:46,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-11-20T19:27:46,167 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:46,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T19:27:46,168 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:46,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:46,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130926186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130926187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130926188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130926188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130926189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T19:27:46,319 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-20T19:27:46,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:46,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:46,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:46,320 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:46,372 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/20f4ca4086ee45f3b7abb4facb1b9cdf 2024-11-20T19:27:46,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/37e6bbabfae04ea2869fab0dbd4e699a is 50, key is test_row_0/C:col10/1732130865865/Put/seqid=0 2024-11-20T19:27:46,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742555_1731 (size=12301) 2024-11-20T19:27:46,440 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/37e6bbabfae04ea2869fab0dbd4e699a 2024-11-20T19:27:46,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/d75d6367b28c434f89859a42efc5e6a0 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d75d6367b28c434f89859a42efc5e6a0 2024-11-20T19:27:46,450 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d75d6367b28c434f89859a42efc5e6a0, entries=200, sequenceid=316, filesize=39.0 K 2024-11-20T19:27:46,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/20f4ca4086ee45f3b7abb4facb1b9cdf as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/20f4ca4086ee45f3b7abb4facb1b9cdf 2024-11-20T19:27:46,456 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/20f4ca4086ee45f3b7abb4facb1b9cdf, entries=150, sequenceid=316, filesize=12.0 K 2024-11-20T19:27:46,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/37e6bbabfae04ea2869fab0dbd4e699a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/37e6bbabfae04ea2869fab0dbd4e699a 2024-11-20T19:27:46,461 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/37e6bbabfae04ea2869fab0dbd4e699a, entries=150, sequenceid=316, filesize=12.0 K 2024-11-20T19:27:46,462 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d8a34887157cb9c8687afaaeab650abb in 596ms, sequenceid=316, compaction requested=true 2024-11-20T19:27:46,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:46,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:46,462 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:46,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:46,462 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:46,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:46,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:46,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:46,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:46,463 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:46,463 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:46,463 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/B is initiating minor compaction (all files) 2024-11-20T19:27:46,463 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/A is initiating minor compaction (all files) 2024-11-20T19:27:46,463 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/A in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:46,463 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/B in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:46,463 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/05458811f80c468f84be29119559d88e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/dd42fbb007854bb6856a6de2eac4214d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d75d6367b28c434f89859a42efc5e6a0] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=100.7 K 2024-11-20T19:27:46,463 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/c6cf6300525548d59168322bf1419399, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/bc93124e60fe4390a978af6d0742903a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/20f4ca4086ee45f3b7abb4facb1b9cdf] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=36.7 K 2024-11-20T19:27:46,463 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:46,463 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/05458811f80c468f84be29119559d88e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/dd42fbb007854bb6856a6de2eac4214d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d75d6367b28c434f89859a42efc5e6a0] 2024-11-20T19:27:46,463 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting c6cf6300525548d59168322bf1419399, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732130864487 2024-11-20T19:27:46,463 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05458811f80c468f84be29119559d88e, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732130864487 2024-11-20T19:27:46,464 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting bc93124e60fe4390a978af6d0742903a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732130864604 2024-11-20T19:27:46,464 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd42fbb007854bb6856a6de2eac4214d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732130864604 2024-11-20T19:27:46,464 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 20f4ca4086ee45f3b7abb4facb1b9cdf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732130865251 2024-11-20T19:27:46,464 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting d75d6367b28c434f89859a42efc5e6a0, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732130865251 2024-11-20T19:27:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T19:27:46,471 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#B#compaction#618 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:46,472 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,472 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/d953174414674824981acc57b601fbfc is 50, key is test_row_0/B:col10/1732130865865/Put/seqid=0 2024-11-20T19:27:46,472 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-20T19:27:46,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:46,472 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-20T19:27:46,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:46,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:46,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:46,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:46,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:46,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:46,473 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:46,483 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120c8335d37e43f4c95bcf0825d203237e0_d8a34887157cb9c8687afaaeab650abb store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:46,485 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120c8335d37e43f4c95bcf0825d203237e0_d8a34887157cb9c8687afaaeab650abb, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:46,485 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c8335d37e43f4c95bcf0825d203237e0_d8a34887157cb9c8687afaaeab650abb because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:46,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d72e8d7b0996480dae3cf11dc27a27e9_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130865877/Put/seqid=0 2024-11-20T19:27:46,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742557_1733 (size=4469) 2024-11-20T19:27:46,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742556_1732 (size=13051) 2024-11-20T19:27:46,521 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/d953174414674824981acc57b601fbfc as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/d953174414674824981acc57b601fbfc 2024-11-20T19:27:46,529 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/B of d8a34887157cb9c8687afaaeab650abb into d953174414674824981acc57b601fbfc(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:46,530 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:46,530 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/B, priority=13, startTime=1732130866462; duration=0sec 2024-11-20T19:27:46,530 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:46,530 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:B 2024-11-20T19:27:46,530 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:46,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:46,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742558_1734 (size=12454) 2024-11-20T19:27:46,539 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:46,539 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/C is initiating minor compaction (all files) 2024-11-20T19:27:46,539 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/C in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:46,539 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/35dcf9f634e44e66b31bd72028b470a4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/0b8d6c4cefa346c684a20aa054f77a8a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/37e6bbabfae04ea2869fab0dbd4e699a] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=36.7 K 2024-11-20T19:27:46,540 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 35dcf9f634e44e66b31bd72028b470a4, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1732130864487 2024-11-20T19:27:46,540 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b8d6c4cefa346c684a20aa054f77a8a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732130864604 2024-11-20T19:27:46,540 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 37e6bbabfae04ea2869fab0dbd4e699a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732130865251 2024-11-20T19:27:46,548 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#C#compaction#621 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:46,549 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/7247f3d7dc084a0da05d8058e82ebd9a is 50, key is test_row_0/C:col10/1732130865865/Put/seqid=0 2024-11-20T19:27:46,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130926551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130926553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130926554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130926555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130926559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742559_1735 (size=13051) 2024-11-20T19:27:46,581 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/7247f3d7dc084a0da05d8058e82ebd9a as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/7247f3d7dc084a0da05d8058e82ebd9a 2024-11-20T19:27:46,587 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/C of d8a34887157cb9c8687afaaeab650abb into 7247f3d7dc084a0da05d8058e82ebd9a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:46,587 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:46,587 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/C, priority=13, startTime=1732130866462; duration=0sec 2024-11-20T19:27:46,587 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:46,588 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:C 2024-11-20T19:27:46,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130926657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130926660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130926660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130926661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130926663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T19:27:46,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130926860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130926863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130926865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130926865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:46,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130926867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:46,913 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#A#compaction#619 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:46,913 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/817114e16e904903a3c9159225473c8d is 175, key is test_row_0/A:col10/1732130865865/Put/seqid=0 2024-11-20T19:27:46,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:46,940 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120d72e8d7b0996480dae3cf11dc27a27e9_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d72e8d7b0996480dae3cf11dc27a27e9_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:46,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/420d352dd6cc412cba7577c4a22433d6, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:46,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/420d352dd6cc412cba7577c4a22433d6 is 175, key is test_row_0/A:col10/1732130865877/Put/seqid=0 2024-11-20T19:27:46,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742560_1736 (size=32005) 2024-11-20T19:27:46,953 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/817114e16e904903a3c9159225473c8d as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/817114e16e904903a3c9159225473c8d 2024-11-20T19:27:46,959 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/A of d8a34887157cb9c8687afaaeab650abb into 817114e16e904903a3c9159225473c8d(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:46,959 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:46,959 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/A, priority=13, startTime=1732130866462; duration=0sec 2024-11-20T19:27:46,959 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:46,959 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:A 2024-11-20T19:27:46,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742561_1737 (size=31255) 2024-11-20T19:27:46,970 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=327, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/420d352dd6cc412cba7577c4a22433d6 2024-11-20T19:27:46,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/c945fc435a284248ba9e695bd2545321 is 50, key is test_row_0/B:col10/1732130865877/Put/seqid=0 2024-11-20T19:27:46,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742562_1738 (size=12301) 2024-11-20T19:27:47,001 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/c945fc435a284248ba9e695bd2545321 2024-11-20T19:27:47,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/9be1ba3991084cddb4a48ef149fde792 is 50, key is test_row_0/C:col10/1732130865877/Put/seqid=0 2024-11-20T19:27:47,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742563_1739 (size=12301) 2024-11-20T19:27:47,031 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/9be1ba3991084cddb4a48ef149fde792 2024-11-20T19:27:47,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/420d352dd6cc412cba7577c4a22433d6 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/420d352dd6cc412cba7577c4a22433d6 2024-11-20T19:27:47,044 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/420d352dd6cc412cba7577c4a22433d6, entries=150, sequenceid=327, filesize=30.5 K 2024-11-20T19:27:47,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/c945fc435a284248ba9e695bd2545321 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/c945fc435a284248ba9e695bd2545321 2024-11-20T19:27:47,053 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/c945fc435a284248ba9e695bd2545321, entries=150, sequenceid=327, filesize=12.0 K 2024-11-20T19:27:47,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/9be1ba3991084cddb4a48ef149fde792 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9be1ba3991084cddb4a48ef149fde792 2024-11-20T19:27:47,060 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9be1ba3991084cddb4a48ef149fde792, entries=150, sequenceid=327, filesize=12.0 K 2024-11-20T19:27:47,061 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for d8a34887157cb9c8687afaaeab650abb in 589ms, sequenceid=327, compaction requested=false 2024-11-20T19:27:47,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:47,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:47,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-20T19:27:47,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-11-20T19:27:47,064 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-20T19:27:47,064 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 894 msec 2024-11-20T19:27:47,065 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 898 msec 2024-11-20T19:27:47,172 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-20T19:27:47,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:47,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:47,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:47,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:47,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:47,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:47,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:47,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130927174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130927176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130927177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130927178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130927179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,193 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205f7ee08aac7047e0a0502b64b3968eaf_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130867168/Put/seqid=0 2024-11-20T19:27:47,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742564_1740 (size=17534) 2024-11-20T19:27:47,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-20T19:27:47,270 INFO [Thread-2818 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-20T19:27:47,272 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-20T19:27:47,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-11-20T19:27:47,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T19:27:47,273 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-20T19:27:47,274 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-20T19:27:47,274 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-20T19:27:47,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130927281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130927281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130927283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130927284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130927286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T19:27:47,425 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T19:27:47,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:47,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:47,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:47,426 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:47,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:47,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:47,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130927486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130927486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130927486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130927486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130927490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T19:27:47,582 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T19:27:47,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:47,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:47,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:47,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:47,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:47,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:47,643 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:47,646 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411205f7ee08aac7047e0a0502b64b3968eaf_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205f7ee08aac7047e0a0502b64b3968eaf_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:47,647 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/116c8c1c92114318a3efc15030dfc047, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:47,647 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/116c8c1c92114318a3efc15030dfc047 is 175, key is test_row_0/A:col10/1732130867168/Put/seqid=0 2024-11-20T19:27:47,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742565_1741 (size=48639) 2024-11-20T19:27:47,670 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=358, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/116c8c1c92114318a3efc15030dfc047 2024-11-20T19:27:47,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/7ee3f725e38b4c3998b02bcfa7089b1e is 50, key is test_row_0/B:col10/1732130867168/Put/seqid=0 2024-11-20T19:27:47,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742566_1742 (size=12301) 2024-11-20T19:27:47,706 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/7ee3f725e38b4c3998b02bcfa7089b1e 2024-11-20T19:27:47,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/0654915018f541c28c2c4e05e61151fa is 50, key is test_row_0/C:col10/1732130867168/Put/seqid=0 2024-11-20T19:27:47,734 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,736 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T19:27:47,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:47,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:47,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:47,736 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:47,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:47,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:47,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742567_1743 (size=12301) 2024-11-20T19:27:47,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130927788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130927789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130927789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,793 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130927792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:47,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130927794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T19:27:47,888 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:47,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T19:27:47,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:47,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:47,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:47,889 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:47,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:47,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:48,040 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,041 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T19:27:48,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:48,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:48,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:48,041 ERROR [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:48,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:48,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-20T19:27:48,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/0654915018f541c28c2c4e05e61151fa 2024-11-20T19:27:48,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/116c8c1c92114318a3efc15030dfc047 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/116c8c1c92114318a3efc15030dfc047 2024-11-20T19:27:48,158 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/116c8c1c92114318a3efc15030dfc047, entries=250, sequenceid=358, filesize=47.5 K 2024-11-20T19:27:48,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/7ee3f725e38b4c3998b02bcfa7089b1e as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/7ee3f725e38b4c3998b02bcfa7089b1e 2024-11-20T19:27:48,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/7ee3f725e38b4c3998b02bcfa7089b1e, entries=150, sequenceid=358, filesize=12.0 K 2024-11-20T19:27:48,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/0654915018f541c28c2c4e05e61151fa as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/0654915018f541c28c2c4e05e61151fa 2024-11-20T19:27:48,168 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/0654915018f541c28c2c4e05e61151fa, entries=150, sequenceid=358, filesize=12.0 K 2024-11-20T19:27:48,169 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for d8a34887157cb9c8687afaaeab650abb in 998ms, sequenceid=358, compaction requested=true 2024-11-20T19:27:48,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:48,169 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:48,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:48,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:48,170 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:48,170 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111899 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:48,170 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/A is initiating minor compaction (all files) 2024-11-20T19:27:48,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:48,170 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/A in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:48,171 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/817114e16e904903a3c9159225473c8d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/420d352dd6cc412cba7577c4a22433d6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/116c8c1c92114318a3efc15030dfc047] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=109.3 K 2024-11-20T19:27:48,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:48,171 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:48,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:48,171 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/817114e16e904903a3c9159225473c8d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/420d352dd6cc412cba7577c4a22433d6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/116c8c1c92114318a3efc15030dfc047] 2024-11-20T19:27:48,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:48,171 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:48,171 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/B is initiating minor compaction (all files) 2024-11-20T19:27:48,171 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/B in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:48,171 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/d953174414674824981acc57b601fbfc, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/c945fc435a284248ba9e695bd2545321, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/7ee3f725e38b4c3998b02bcfa7089b1e] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=36.8 K 2024-11-20T19:27:48,171 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 817114e16e904903a3c9159225473c8d, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732130865251 2024-11-20T19:27:48,171 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting d953174414674824981acc57b601fbfc, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732130865251 2024-11-20T19:27:48,172 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 420d352dd6cc412cba7577c4a22433d6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732130865869 2024-11-20T19:27:48,172 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting c945fc435a284248ba9e695bd2545321, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732130865869 2024-11-20T19:27:48,172 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting 116c8c1c92114318a3efc15030dfc047, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732130866553 2024-11-20T19:27:48,172 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ee3f725e38b4c3998b02bcfa7089b1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732130866554 2024-11-20T19:27:48,193 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-20T19:27:48,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:48,194 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-20T19:27:48,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:48,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:48,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:48,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:48,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:48,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:48,196 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#B#compaction#627 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:48,197 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/71de39b8af6744e2ac62b562c85f0683 is 50, key is test_row_0/B:col10/1732130867168/Put/seqid=0 2024-11-20T19:27:48,205 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:48,235 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120bc0adca34c014d2b8834db03bb22ebd4_d8a34887157cb9c8687afaaeab650abb store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:48,238 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120bc0adca34c014d2b8834db03bb22ebd4_d8a34887157cb9c8687afaaeab650abb, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:48,238 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120bc0adca34c014d2b8834db03bb22ebd4_d8a34887157cb9c8687afaaeab650abb because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:48,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742568_1744 (size=13153) 2024-11-20T19:27:48,245 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/71de39b8af6744e2ac62b562c85f0683 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/71de39b8af6744e2ac62b562c85f0683 2024-11-20T19:27:48,250 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/B of d8a34887157cb9c8687afaaeab650abb into 71de39b8af6744e2ac62b562c85f0683(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:48,250 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:48,250 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/B, priority=13, startTime=1732130868170; duration=0sec 2024-11-20T19:27:48,251 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:48,251 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:B 2024-11-20T19:27:48,251 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:48,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112044ae90fa152e446db3a66d04bc8f8601_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130867177/Put/seqid=0 2024-11-20T19:27:48,252 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:48,252 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/C is initiating minor compaction (all files) 2024-11-20T19:27:48,252 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/C in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:48,252 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/7247f3d7dc084a0da05d8058e82ebd9a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9be1ba3991084cddb4a48ef149fde792, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/0654915018f541c28c2c4e05e61151fa] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=36.8 K 2024-11-20T19:27:48,252 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 7247f3d7dc084a0da05d8058e82ebd9a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732130865251 2024-11-20T19:27:48,253 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 9be1ba3991084cddb4a48ef149fde792, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732130865869 2024-11-20T19:27:48,253 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 0654915018f541c28c2c4e05e61151fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732130866554 2024-11-20T19:27:48,282 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#C#compaction#630 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:48,283 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/3381aa4e1dc8446d8dfd8c4d83918269 is 50, key is test_row_0/C:col10/1732130867168/Put/seqid=0 2024-11-20T19:27:48,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742569_1745 (size=4469) 2024-11-20T19:27:48,292 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#A#compaction#628 average throughput is 0.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:48,293 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/e2e8a689fc164b7ba26e281851df8abd is 175, key is test_row_0/A:col10/1732130867168/Put/seqid=0 2024-11-20T19:27:48,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. as already flushing 2024-11-20T19:27:48,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:48,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742570_1746 (size=12454) 2024-11-20T19:27:48,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:48,313 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112044ae90fa152e446db3a66d04bc8f8601_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112044ae90fa152e446db3a66d04bc8f8601_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:48,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/f07e7a50c4cd4994a28574bbd2ffe86f, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:48,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/f07e7a50c4cd4994a28574bbd2ffe86f is 175, key is test_row_0/A:col10/1732130867177/Put/seqid=0 2024-11-20T19:27:48,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742571_1747 (size=13153) 2024-11-20T19:27:48,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130928322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130928324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,331 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/3381aa4e1dc8446d8dfd8c4d83918269 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/3381aa4e1dc8446d8dfd8c4d83918269 2024-11-20T19:27:48,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130928324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130928328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,332 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130928328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742572_1748 (size=32107) 2024-11-20T19:27:48,342 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/C of d8a34887157cb9c8687afaaeab650abb into 3381aa4e1dc8446d8dfd8c4d83918269(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:48,342 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:48,342 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/C, priority=13, startTime=1732130868171; duration=0sec 2024-11-20T19:27:48,342 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:48,342 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:C 2024-11-20T19:27:48,346 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/e2e8a689fc164b7ba26e281851df8abd as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/e2e8a689fc164b7ba26e281851df8abd 2024-11-20T19:27:48,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742573_1749 (size=31255) 2024-11-20T19:27:48,349 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=366, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/f07e7a50c4cd4994a28574bbd2ffe86f 2024-11-20T19:27:48,352 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/A of d8a34887157cb9c8687afaaeab650abb into e2e8a689fc164b7ba26e281851df8abd(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:48,352 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:48,352 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/A, priority=13, startTime=1732130868169; duration=0sec 2024-11-20T19:27:48,352 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:48,352 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:A 2024-11-20T19:27:48,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/8ea84bcb3da34a2082dc085c42035d28 is 50, key is test_row_0/B:col10/1732130867177/Put/seqid=0 2024-11-20T19:27:48,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T19:27:48,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742574_1750 (size=12301) 2024-11-20T19:27:48,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130928429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130928433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130928433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130928434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130928434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130928632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130928635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130928636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130928637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130928637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,793 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/8ea84bcb3da34a2082dc085c42035d28 2024-11-20T19:27:48,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/9265f2c75bf34e5f9e1f6492a0466b95 is 50, key is test_row_0/C:col10/1732130867177/Put/seqid=0 2024-11-20T19:27:48,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742575_1751 (size=12301) 2024-11-20T19:27:48,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53588 deadline: 1732130928935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130928938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53642 deadline: 1732130928938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53552 deadline: 1732130928939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:48,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:48,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53614 deadline: 1732130928940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:49,232 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/9265f2c75bf34e5f9e1f6492a0466b95 2024-11-20T19:27:49,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/f07e7a50c4cd4994a28574bbd2ffe86f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f07e7a50c4cd4994a28574bbd2ffe86f 2024-11-20T19:27:49,241 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f07e7a50c4cd4994a28574bbd2ffe86f, entries=150, sequenceid=366, filesize=30.5 K 2024-11-20T19:27:49,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/8ea84bcb3da34a2082dc085c42035d28 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/8ea84bcb3da34a2082dc085c42035d28 2024-11-20T19:27:49,249 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/8ea84bcb3da34a2082dc085c42035d28, entries=150, sequenceid=366, filesize=12.0 K 2024-11-20T19:27:49,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/9265f2c75bf34e5f9e1f6492a0466b95 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9265f2c75bf34e5f9e1f6492a0466b95 2024-11-20T19:27:49,257 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9265f2c75bf34e5f9e1f6492a0466b95, entries=150, sequenceid=366, filesize=12.0 K 2024-11-20T19:27:49,257 INFO [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=174.43 KB/178620 for d8a34887157cb9c8687afaaeab650abb in 1063ms, sequenceid=366, compaction requested=false 2024-11-20T19:27:49,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:49,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:49,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/db9c3a6c6492:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-20T19:27:49,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-11-20T19:27:49,262 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-20T19:27:49,262 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9850 sec 2024-11-20T19:27:49,264 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 1.9900 sec 2024-11-20T19:27:49,313 DEBUG [Thread-2823 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bb6288a to 127.0.0.1:50476 2024-11-20T19:27:49,313 DEBUG [Thread-2825 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06556601 to 127.0.0.1:50476 2024-11-20T19:27:49,313 DEBUG [Thread-2823 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:49,313 DEBUG [Thread-2825 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:49,314 DEBUG [Thread-2821 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53305d9b to 127.0.0.1:50476 2024-11-20T19:27:49,314 DEBUG [Thread-2821 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:49,315 DEBUG [Thread-2819 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d836f78 to 127.0.0.1:50476 2024-11-20T19:27:49,315 DEBUG [Thread-2819 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:49,316 DEBUG [Thread-2827 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x458a85fd to 127.0.0.1:50476 2024-11-20T19:27:49,316 DEBUG [Thread-2827 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:49,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-20T19:27:49,377 INFO [Thread-2818 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-11-20T19:27:49,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(8581): Flush requested on d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:49,438 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-11-20T19:27:49,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:49,439 DEBUG [Thread-2812 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17327621 to 127.0.0.1:50476 2024-11-20T19:27:49,439 DEBUG [Thread-2812 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:49,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:49,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:49,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:49,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:49,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:49,443 DEBUG [Thread-2810 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7fc332d8 to 127.0.0.1:50476 2024-11-20T19:27:49,443 DEBUG [Thread-2810 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:49,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c734f423b39441cb866c8a99b0795c74_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130869437/Put/seqid=0 2024-11-20T19:27:49,445 DEBUG [Thread-2816 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b914bf4 to 127.0.0.1:50476 2024-11-20T19:27:49,445 DEBUG [Thread-2808 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d5efb7a to 127.0.0.1:50476 2024-11-20T19:27:49,445 DEBUG [Thread-2816 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:49,445 DEBUG [Thread-2808 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:49,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:49,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130929445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:49,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742576_1752 (size=12454) 2024-11-20T19:27:49,848 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:49,850 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c734f423b39441cb866c8a99b0795c74_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c734f423b39441cb866c8a99b0795c74_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:49,851 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/a18bfd0e6dab4859b0cbfc08f9ef1ded, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:49,851 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/a18bfd0e6dab4859b0cbfc08f9ef1ded is 175, key is test_row_0/A:col10/1732130869437/Put/seqid=0 2024-11-20T19:27:49,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742577_1753 (size=31255) 2024-11-20T19:27:50,255 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=399, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/a18bfd0e6dab4859b0cbfc08f9ef1ded 2024-11-20T19:27:50,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/9890c96173b6419792ccac3919e9ef20 is 50, key is test_row_0/B:col10/1732130869437/Put/seqid=0 2024-11-20T19:27:50,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742578_1754 (size=12301) 2024-11-20T19:27:50,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-20T19:27:50,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35979 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53576 deadline: 1732130930450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:50,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/9890c96173b6419792ccac3919e9ef20 2024-11-20T19:27:50,667 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/622c1378aa4d4057af226ca74a129310 is 50, key is test_row_0/C:col10/1732130869437/Put/seqid=0 2024-11-20T19:27:50,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742579_1755 (size=12301) 2024-11-20T19:27:51,081 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/622c1378aa4d4057af226ca74a129310 2024-11-20T19:27:51,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/a18bfd0e6dab4859b0cbfc08f9ef1ded as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/a18bfd0e6dab4859b0cbfc08f9ef1ded 2024-11-20T19:27:51,086 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/a18bfd0e6dab4859b0cbfc08f9ef1ded, entries=150, sequenceid=399, filesize=30.5 K 2024-11-20T19:27:51,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/9890c96173b6419792ccac3919e9ef20 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/9890c96173b6419792ccac3919e9ef20 2024-11-20T19:27:51,089 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/9890c96173b6419792ccac3919e9ef20, entries=150, sequenceid=399, filesize=12.0 K 2024-11-20T19:27:51,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/622c1378aa4d4057af226ca74a129310 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/622c1378aa4d4057af226ca74a129310 2024-11-20T19:27:51,092 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/622c1378aa4d4057af226ca74a129310, entries=150, sequenceid=399, filesize=12.0 K 2024-11-20T19:27:51,093 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for d8a34887157cb9c8687afaaeab650abb in 1655ms, sequenceid=399, compaction requested=true 2024-11-20T19:27:51,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:51,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:A, priority=-2147483648, current under compaction store size is 1 2024-11-20T19:27:51,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:51,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:B, priority=-2147483648, current under compaction store size is 2 2024-11-20T19:27:51,093 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:51,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:51,093 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:51,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d8a34887157cb9c8687afaaeab650abb:C, priority=-2147483648, current under compaction store size is 3 2024-11-20T19:27:51,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:51,093 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:51,093 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94617 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:51,093 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/B is initiating minor compaction (all files) 2024-11-20T19:27:51,093 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/A is initiating minor compaction (all files) 2024-11-20T19:27:51,094 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/B in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:51,094 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/A in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:51,094 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/71de39b8af6744e2ac62b562c85f0683, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/8ea84bcb3da34a2082dc085c42035d28, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/9890c96173b6419792ccac3919e9ef20] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=36.9 K 2024-11-20T19:27:51,094 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/e2e8a689fc164b7ba26e281851df8abd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f07e7a50c4cd4994a28574bbd2ffe86f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/a18bfd0e6dab4859b0cbfc08f9ef1ded] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=92.4 K 2024-11-20T19:27:51,094 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:51,094 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. files: [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/e2e8a689fc164b7ba26e281851df8abd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f07e7a50c4cd4994a28574bbd2ffe86f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/a18bfd0e6dab4859b0cbfc08f9ef1ded] 2024-11-20T19:27:51,094 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 71de39b8af6744e2ac62b562c85f0683, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732130866554 2024-11-20T19:27:51,094 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2e8a689fc164b7ba26e281851df8abd, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732130866554 2024-11-20T19:27:51,094 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ea84bcb3da34a2082dc085c42035d28, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732130867172 2024-11-20T19:27:51,094 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting f07e7a50c4cd4994a28574bbd2ffe86f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732130867172 2024-11-20T19:27:51,094 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 9890c96173b6419792ccac3919e9ef20, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732130868320 2024-11-20T19:27:51,094 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] compactions.Compactor(224): Compacting a18bfd0e6dab4859b0cbfc08f9ef1ded, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732130868320 2024-11-20T19:27:51,104 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#B#compaction#636 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:51,104 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/ac67f6b09e5141d1981813f87a57a9a3 is 50, key is test_row_0/B:col10/1732130869437/Put/seqid=0 2024-11-20T19:27:51,106 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:51,127 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241120c3ed816b28b4420a9b6a1f00966535f4_d8a34887157cb9c8687afaaeab650abb store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:51,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742580_1756 (size=13255) 2024-11-20T19:27:51,134 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/ac67f6b09e5141d1981813f87a57a9a3 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/ac67f6b09e5141d1981813f87a57a9a3 2024-11-20T19:27:51,138 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/B of d8a34887157cb9c8687afaaeab650abb into ac67f6b09e5141d1981813f87a57a9a3(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:51,138 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:51,138 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/B, priority=13, startTime=1732130871093; duration=0sec 2024-11-20T19:27:51,138 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-20T19:27:51,138 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:B 2024-11-20T19:27:51,138 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-20T19:27:51,139 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-20T19:27:51,139 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1540): d8a34887157cb9c8687afaaeab650abb/C is initiating minor compaction (all files) 2024-11-20T19:27:51,139 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d8a34887157cb9c8687afaaeab650abb/C in TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:51,139 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/3381aa4e1dc8446d8dfd8c4d83918269, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9265f2c75bf34e5f9e1f6492a0466b95, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/622c1378aa4d4057af226ca74a129310] into tmpdir=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp, totalSize=36.9 K 2024-11-20T19:27:51,139 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 3381aa4e1dc8446d8dfd8c4d83918269, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732130866554 2024-11-20T19:27:51,140 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 9265f2c75bf34e5f9e1f6492a0466b95, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732130867172 2024-11-20T19:27:51,140 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] compactions.Compactor(224): Compacting 622c1378aa4d4057af226ca74a129310, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732130868320 2024-11-20T19:27:51,152 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241120c3ed816b28b4420a9b6a1f00966535f4_d8a34887157cb9c8687afaaeab650abb, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:51,152 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c3ed816b28b4420a9b6a1f00966535f4_d8a34887157cb9c8687afaaeab650abb because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:51,157 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#C#compaction#638 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:51,157 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/f5fabf3068db47968f5ffbf2ec4b6a40 is 50, key is test_row_0/C:col10/1732130869437/Put/seqid=0 2024-11-20T19:27:51,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742581_1757 (size=4469) 2024-11-20T19:27:51,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742582_1758 (size=13255) 2024-11-20T19:27:51,204 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/f5fabf3068db47968f5ffbf2ec4b6a40 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/f5fabf3068db47968f5ffbf2ec4b6a40 2024-11-20T19:27:51,211 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/C of d8a34887157cb9c8687afaaeab650abb into f5fabf3068db47968f5ffbf2ec4b6a40(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:51,211 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:51,211 INFO [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/C, priority=13, startTime=1732130871093; duration=0sec 2024-11-20T19:27:51,211 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:51,211 DEBUG [RS:0;db9c3a6c6492:35979-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:C 2024-11-20T19:27:51,573 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d8a34887157cb9c8687afaaeab650abb#A#compaction#637 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-20T19:27:51,574 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/89a215f425714da7a105c105a79c91f6 is 175, key is test_row_0/A:col10/1732130869437/Put/seqid=0 2024-11-20T19:27:51,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742583_1759 (size=32209) 2024-11-20T19:27:51,980 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/89a215f425714da7a105c105a79c91f6 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/89a215f425714da7a105c105a79c91f6 2024-11-20T19:27:51,983 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d8a34887157cb9c8687afaaeab650abb/A of d8a34887157cb9c8687afaaeab650abb into 89a215f425714da7a105c105a79c91f6(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-20T19:27:51,983 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:51,983 INFO [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb., storeName=d8a34887157cb9c8687afaaeab650abb/A, priority=13, startTime=1732130871093; duration=0sec 2024-11-20T19:27:51,983 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-20T19:27:51,983 DEBUG [RS:0;db9c3a6c6492:35979-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d8a34887157cb9c8687afaaeab650abb:A 2024-11-20T19:27:52,459 DEBUG [Thread-2814 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1584f18a to 127.0.0.1:50476 2024-11-20T19:27:52,459 DEBUG [Thread-2814 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:52,459 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-20T19:27:52,459 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-11-20T19:27:52,459 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-11-20T19:27:52,459 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-11-20T19:27:52,459 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-11-20T19:27:52,459 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-11-20T19:27:52,459 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-20T19:27:52,459 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5764 2024-11-20T19:27:52,459 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5569 2024-11-20T19:27:52,459 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5650 2024-11-20T19:27:52,459 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5725 2024-11-20T19:27:52,459 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5617 2024-11-20T19:27:52,459 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-20T19:27:52,459 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T19:27:52,459 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d9954b7 to 127.0.0.1:50476 2024-11-20T19:27:52,460 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:52,460 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-20T19:27:52,460 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-20T19:27:52,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T19:27:52,462 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130872462"}]},"ts":"1732130872462"} 2024-11-20T19:27:52,463 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-20T19:27:52,516 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-20T19:27:52,517 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-20T19:27:52,518 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d8a34887157cb9c8687afaaeab650abb, UNASSIGN}] 2024-11-20T19:27:52,518 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d8a34887157cb9c8687afaaeab650abb, UNASSIGN 2024-11-20T19:27:52,519 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=d8a34887157cb9c8687afaaeab650abb, regionState=CLOSING, regionLocation=db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:52,519 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-20T19:27:52,520 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; CloseRegionProcedure d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276}] 2024-11-20T19:27:52,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T19:27:52,671 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:52,671 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(124): Close d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:52,671 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-20T19:27:52,671 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1681): Closing d8a34887157cb9c8687afaaeab650abb, disabling compactions & flushes 2024-11-20T19:27:52,671 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:52,671 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:52,671 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. after waiting 0 ms 2024-11-20T19:27:52,671 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:52,671 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(2837): Flushing d8a34887157cb9c8687afaaeab650abb 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-20T19:27:52,672 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=A 2024-11-20T19:27:52,672 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:52,672 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=B 2024-11-20T19:27:52,672 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:52,672 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d8a34887157cb9c8687afaaeab650abb, store=C 2024-11-20T19:27:52,672 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-20T19:27:52,677 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c17da88a25ca46bd9f48ccb6d9ad29d7_d8a34887157cb9c8687afaaeab650abb is 50, key is test_row_0/A:col10/1732130869443/Put/seqid=0 2024-11-20T19:27:52,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742584_1760 (size=12454) 2024-11-20T19:27:52,680 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-20T19:27:52,683 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241120c17da88a25ca46bd9f48ccb6d9ad29d7_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c17da88a25ca46bd9f48ccb6d9ad29d7_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:52,683 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/f96372874f844b7b85b950655d9f4d4c, store: [table=TestAcidGuarantees family=A region=d8a34887157cb9c8687afaaeab650abb] 2024-11-20T19:27:52,684 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/f96372874f844b7b85b950655d9f4d4c is 175, key is test_row_0/A:col10/1732130869443/Put/seqid=0 2024-11-20T19:27:52,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742585_1761 (size=31255) 2024-11-20T19:27:52,687 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=409, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/f96372874f844b7b85b950655d9f4d4c 2024-11-20T19:27:52,692 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/bbcaa82f8eff4941ba04ff8bab3c46f2 is 50, key is test_row_0/B:col10/1732130869443/Put/seqid=0 2024-11-20T19:27:52,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742586_1762 (size=12301) 2024-11-20T19:27:52,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T19:27:53,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T19:27:53,096 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/bbcaa82f8eff4941ba04ff8bab3c46f2 2024-11-20T19:27:53,101 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/446e51eb8ce1433191535effedfe0976 is 50, key is test_row_0/C:col10/1732130869443/Put/seqid=0 2024-11-20T19:27:53,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742587_1763 (size=12301) 2024-11-20T19:27:53,505 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/446e51eb8ce1433191535effedfe0976 2024-11-20T19:27:53,534 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/A/f96372874f844b7b85b950655d9f4d4c as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f96372874f844b7b85b950655d9f4d4c 2024-11-20T19:27:53,537 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f96372874f844b7b85b950655d9f4d4c, entries=150, sequenceid=409, filesize=30.5 K 2024-11-20T19:27:53,537 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/B/bbcaa82f8eff4941ba04ff8bab3c46f2 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/bbcaa82f8eff4941ba04ff8bab3c46f2 2024-11-20T19:27:53,540 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/bbcaa82f8eff4941ba04ff8bab3c46f2, entries=150, sequenceid=409, filesize=12.0 K 2024-11-20T19:27:53,540 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/.tmp/C/446e51eb8ce1433191535effedfe0976 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/446e51eb8ce1433191535effedfe0976 2024-11-20T19:27:53,542 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/446e51eb8ce1433191535effedfe0976, entries=150, sequenceid=409, filesize=12.0 K 2024-11-20T19:27:53,543 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for d8a34887157cb9c8687afaaeab650abb in 872ms, sequenceid=409, compaction requested=false 2024-11-20T19:27:53,543 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/b3efa72140de4798b7102997f9df3048, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d1271008cd04458fbe97dc7b27eca200, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9b89cfcca911448c803bc8cbda4d81fe, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/c5e6d505add84abfbb87f6eca6c5ad5e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/123f71e217594439a164b80553efc488, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/2057403d29724c7d9bf58ba8ee82f815, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9006ba5ed2db42938f6f11a0cda7fd86, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f240454ce4624855a0df47e41cfc5c1f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/b394a62bfc3c4cbfb3389f6405706d4f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9c6eaac2e9264daf8071c80c8d379ce1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/ca411eefc1154d348575047304d35eec, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cbb01a60e839454a90f741ead74c984b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9ad1702623c4462ca791c54d917c40cc, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/34ef6e9fde9d4f6cbac6ca01ba9774d3, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cac3d81ddf9945b49e2e657318105646, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cf7d2e5a56ce45ed83c6e3f9e0322e3b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/98579d4cf8bb427db89a71eeff920377, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/e020761fb1c34adf93a2ff10230ca408, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/05458811f80c468f84be29119559d88e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d0eba6bf7db4457d9ad7327e54bc6aba, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/dd42fbb007854bb6856a6de2eac4214d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d75d6367b28c434f89859a42efc5e6a0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/817114e16e904903a3c9159225473c8d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/420d352dd6cc412cba7577c4a22433d6, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/116c8c1c92114318a3efc15030dfc047, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/e2e8a689fc164b7ba26e281851df8abd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f07e7a50c4cd4994a28574bbd2ffe86f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/a18bfd0e6dab4859b0cbfc08f9ef1ded] to archive 2024-11-20T19:27:53,544 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:53,545 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/b3efa72140de4798b7102997f9df3048 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/b3efa72140de4798b7102997f9df3048 2024-11-20T19:27:53,546 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d1271008cd04458fbe97dc7b27eca200 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d1271008cd04458fbe97dc7b27eca200 2024-11-20T19:27:53,546 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9b89cfcca911448c803bc8cbda4d81fe to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9b89cfcca911448c803bc8cbda4d81fe 2024-11-20T19:27:53,547 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/c5e6d505add84abfbb87f6eca6c5ad5e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/c5e6d505add84abfbb87f6eca6c5ad5e 2024-11-20T19:27:53,548 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/123f71e217594439a164b80553efc488 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/123f71e217594439a164b80553efc488 2024-11-20T19:27:53,549 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/2057403d29724c7d9bf58ba8ee82f815 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/2057403d29724c7d9bf58ba8ee82f815 2024-11-20T19:27:53,550 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9006ba5ed2db42938f6f11a0cda7fd86 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9006ba5ed2db42938f6f11a0cda7fd86 2024-11-20T19:27:53,551 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f240454ce4624855a0df47e41cfc5c1f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f240454ce4624855a0df47e41cfc5c1f 2024-11-20T19:27:53,552 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/b394a62bfc3c4cbfb3389f6405706d4f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/b394a62bfc3c4cbfb3389f6405706d4f 2024-11-20T19:27:53,553 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9c6eaac2e9264daf8071c80c8d379ce1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9c6eaac2e9264daf8071c80c8d379ce1 2024-11-20T19:27:53,555 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/ca411eefc1154d348575047304d35eec to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/ca411eefc1154d348575047304d35eec 2024-11-20T19:27:53,558 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cbb01a60e839454a90f741ead74c984b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cbb01a60e839454a90f741ead74c984b 2024-11-20T19:27:53,559 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9ad1702623c4462ca791c54d917c40cc to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/9ad1702623c4462ca791c54d917c40cc 2024-11-20T19:27:53,564 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/34ef6e9fde9d4f6cbac6ca01ba9774d3 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/34ef6e9fde9d4f6cbac6ca01ba9774d3 2024-11-20T19:27:53,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T19:27:53,565 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cac3d81ddf9945b49e2e657318105646 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cac3d81ddf9945b49e2e657318105646 2024-11-20T19:27:53,566 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cf7d2e5a56ce45ed83c6e3f9e0322e3b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/cf7d2e5a56ce45ed83c6e3f9e0322e3b 2024-11-20T19:27:53,568 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/98579d4cf8bb427db89a71eeff920377 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/98579d4cf8bb427db89a71eeff920377 2024-11-20T19:27:53,570 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/e020761fb1c34adf93a2ff10230ca408 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/e020761fb1c34adf93a2ff10230ca408 2024-11-20T19:27:53,571 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/05458811f80c468f84be29119559d88e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/05458811f80c468f84be29119559d88e 2024-11-20T19:27:53,572 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d0eba6bf7db4457d9ad7327e54bc6aba to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d0eba6bf7db4457d9ad7327e54bc6aba 2024-11-20T19:27:53,573 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/dd42fbb007854bb6856a6de2eac4214d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/dd42fbb007854bb6856a6de2eac4214d 2024-11-20T19:27:53,574 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d75d6367b28c434f89859a42efc5e6a0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/d75d6367b28c434f89859a42efc5e6a0 2024-11-20T19:27:53,584 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/817114e16e904903a3c9159225473c8d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/817114e16e904903a3c9159225473c8d 2024-11-20T19:27:53,586 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/420d352dd6cc412cba7577c4a22433d6 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/420d352dd6cc412cba7577c4a22433d6 2024-11-20T19:27:53,587 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/116c8c1c92114318a3efc15030dfc047 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/116c8c1c92114318a3efc15030dfc047 2024-11-20T19:27:53,588 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/e2e8a689fc164b7ba26e281851df8abd to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/e2e8a689fc164b7ba26e281851df8abd 2024-11-20T19:27:53,589 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f07e7a50c4cd4994a28574bbd2ffe86f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f07e7a50c4cd4994a28574bbd2ffe86f 2024-11-20T19:27:53,590 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/a18bfd0e6dab4859b0cbfc08f9ef1ded to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/a18bfd0e6dab4859b0cbfc08f9ef1ded 2024-11-20T19:27:53,592 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/346285c5e5cd4c9ebe9e4493de5f19d9, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/16da57388c8743beac29b9f718fe7c15, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/adc5c829bb5f425caf1a18ffe5a3198f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/de77d8b0ae244c8b9828c537b659c0e4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/001cf4d2dbd844f1beec6c7f85e88ccb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/41d5ae68574f42eaa3c3deb61ded489d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/93951d762abe4243a2c68484d41ca689, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/417f6d29c8ef4b49a74860b9c3407fd1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/7c779ff7f0974a18b630560723794d13, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/d01ab141f1764befaa0ce2320d523e7f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/af7d6eaa9d184d2f8f79e2756dd52230, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/ec8d164912f041cd964137ce86abf987, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/acf77aed70f0425eb393302e3a9c99fd, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/9cb87ca38e6c463c88296b555c5ad4a1, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/577e70482d884e1792987ee08e513b37, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/efe72fde2cd24ce2baf2610d19f27d3d, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/0956740f07324b2491276cf4a27b4457, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/52710942317f4357812bf237601c43c5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/c6cf6300525548d59168322bf1419399, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/4ea308f62788475b81f330ef6f4ca7a8, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/bc93124e60fe4390a978af6d0742903a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/d953174414674824981acc57b601fbfc, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/20f4ca4086ee45f3b7abb4facb1b9cdf, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/c945fc435a284248ba9e695bd2545321, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/71de39b8af6744e2ac62b562c85f0683, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/7ee3f725e38b4c3998b02bcfa7089b1e, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/8ea84bcb3da34a2082dc085c42035d28, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/9890c96173b6419792ccac3919e9ef20] to archive 2024-11-20T19:27:53,592 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:53,595 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/346285c5e5cd4c9ebe9e4493de5f19d9 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/346285c5e5cd4c9ebe9e4493de5f19d9 2024-11-20T19:27:53,596 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/16da57388c8743beac29b9f718fe7c15 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/16da57388c8743beac29b9f718fe7c15 2024-11-20T19:27:53,597 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/adc5c829bb5f425caf1a18ffe5a3198f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/adc5c829bb5f425caf1a18ffe5a3198f 2024-11-20T19:27:53,598 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/de77d8b0ae244c8b9828c537b659c0e4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/de77d8b0ae244c8b9828c537b659c0e4 2024-11-20T19:27:53,599 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/001cf4d2dbd844f1beec6c7f85e88ccb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/001cf4d2dbd844f1beec6c7f85e88ccb 2024-11-20T19:27:53,600 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/41d5ae68574f42eaa3c3deb61ded489d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/41d5ae68574f42eaa3c3deb61ded489d 2024-11-20T19:27:53,602 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/93951d762abe4243a2c68484d41ca689 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/93951d762abe4243a2c68484d41ca689 2024-11-20T19:27:53,603 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/417f6d29c8ef4b49a74860b9c3407fd1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/417f6d29c8ef4b49a74860b9c3407fd1 2024-11-20T19:27:53,604 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/7c779ff7f0974a18b630560723794d13 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/7c779ff7f0974a18b630560723794d13 2024-11-20T19:27:53,606 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/d01ab141f1764befaa0ce2320d523e7f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/d01ab141f1764befaa0ce2320d523e7f 2024-11-20T19:27:53,607 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/af7d6eaa9d184d2f8f79e2756dd52230 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/af7d6eaa9d184d2f8f79e2756dd52230 2024-11-20T19:27:53,609 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/ec8d164912f041cd964137ce86abf987 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/ec8d164912f041cd964137ce86abf987 2024-11-20T19:27:53,611 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/acf77aed70f0425eb393302e3a9c99fd to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/acf77aed70f0425eb393302e3a9c99fd 2024-11-20T19:27:53,613 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/9cb87ca38e6c463c88296b555c5ad4a1 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/9cb87ca38e6c463c88296b555c5ad4a1 2024-11-20T19:27:53,614 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/577e70482d884e1792987ee08e513b37 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/577e70482d884e1792987ee08e513b37 2024-11-20T19:27:53,615 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/efe72fde2cd24ce2baf2610d19f27d3d to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/efe72fde2cd24ce2baf2610d19f27d3d 2024-11-20T19:27:53,616 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/0956740f07324b2491276cf4a27b4457 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/0956740f07324b2491276cf4a27b4457 2024-11-20T19:27:53,617 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/52710942317f4357812bf237601c43c5 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/52710942317f4357812bf237601c43c5 2024-11-20T19:27:53,618 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/c6cf6300525548d59168322bf1419399 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/c6cf6300525548d59168322bf1419399 2024-11-20T19:27:53,619 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/4ea308f62788475b81f330ef6f4ca7a8 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/4ea308f62788475b81f330ef6f4ca7a8 2024-11-20T19:27:53,620 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/bc93124e60fe4390a978af6d0742903a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/bc93124e60fe4390a978af6d0742903a 2024-11-20T19:27:53,621 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/d953174414674824981acc57b601fbfc to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/d953174414674824981acc57b601fbfc 2024-11-20T19:27:53,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/20f4ca4086ee45f3b7abb4facb1b9cdf to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/20f4ca4086ee45f3b7abb4facb1b9cdf 2024-11-20T19:27:53,622 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/c945fc435a284248ba9e695bd2545321 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/c945fc435a284248ba9e695bd2545321 2024-11-20T19:27:53,623 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/71de39b8af6744e2ac62b562c85f0683 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/71de39b8af6744e2ac62b562c85f0683 2024-11-20T19:27:53,624 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/7ee3f725e38b4c3998b02bcfa7089b1e to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/7ee3f725e38b4c3998b02bcfa7089b1e 2024-11-20T19:27:53,625 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/8ea84bcb3da34a2082dc085c42035d28 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/8ea84bcb3da34a2082dc085c42035d28 2024-11-20T19:27:53,625 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/9890c96173b6419792ccac3919e9ef20 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/9890c96173b6419792ccac3919e9ef20 2024-11-20T19:27:53,626 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/87932f2594544b0192a616170e8717ff, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/54856a08a8ba40edaf837c47263392f7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/a0af1d1c2ae342f0a77f3c186598813b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/2ec63a4a80754ee2af9374dd5200fa7a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9154e73852bd40e6a3208a0de8f79949, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/6e0a79a19ce84f63aa535b2ecbf101d5, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/abc40d64db1e41f79c302c0182932b94, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/4840e2b5321646a3aa957cb0d2c7a6b0, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/5259f8ff136542b693fd2b3a2a7e001f, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9840103e218349f48327b7232e0014ba, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/e7deae69d57940d5b5fa51243a4a50d7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/d77dfc76bfca44049052239b8b37baac, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9722129a01964b4893db669edc4813ee, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/68a139a3e7074bef85521fd421cb42c4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/3fe9b634c7c042ba95879dd80888bdfb, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/136ca4d59b62440f94ce7840e7737f26, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/eec5c84746ee460d9f72e08375ea17fc, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/03d3a06bd77744388a4dce656f21060b, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/35dcf9f634e44e66b31bd72028b470a4, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/b3353bedb5a24e9fa94bd6b349c52ef7, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/0b8d6c4cefa346c684a20aa054f77a8a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/7247f3d7dc084a0da05d8058e82ebd9a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/37e6bbabfae04ea2869fab0dbd4e699a, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9be1ba3991084cddb4a48ef149fde792, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/3381aa4e1dc8446d8dfd8c4d83918269, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/0654915018f541c28c2c4e05e61151fa, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9265f2c75bf34e5f9e1f6492a0466b95, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/622c1378aa4d4057af226ca74a129310] to archive 2024-11-20T19:27:53,627 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-20T19:27:53,628 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/87932f2594544b0192a616170e8717ff to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/87932f2594544b0192a616170e8717ff 2024-11-20T19:27:53,629 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/54856a08a8ba40edaf837c47263392f7 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/54856a08a8ba40edaf837c47263392f7 2024-11-20T19:27:53,630 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/a0af1d1c2ae342f0a77f3c186598813b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/a0af1d1c2ae342f0a77f3c186598813b 2024-11-20T19:27:53,631 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/2ec63a4a80754ee2af9374dd5200fa7a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/2ec63a4a80754ee2af9374dd5200fa7a 2024-11-20T19:27:53,631 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9154e73852bd40e6a3208a0de8f79949 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9154e73852bd40e6a3208a0de8f79949 2024-11-20T19:27:53,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/6e0a79a19ce84f63aa535b2ecbf101d5 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/6e0a79a19ce84f63aa535b2ecbf101d5 2024-11-20T19:27:53,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/abc40d64db1e41f79c302c0182932b94 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/abc40d64db1e41f79c302c0182932b94 2024-11-20T19:27:53,633 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/4840e2b5321646a3aa957cb0d2c7a6b0 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/4840e2b5321646a3aa957cb0d2c7a6b0 2024-11-20T19:27:53,634 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/5259f8ff136542b693fd2b3a2a7e001f to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/5259f8ff136542b693fd2b3a2a7e001f 2024-11-20T19:27:53,635 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9840103e218349f48327b7232e0014ba to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9840103e218349f48327b7232e0014ba 2024-11-20T19:27:53,635 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/e7deae69d57940d5b5fa51243a4a50d7 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/e7deae69d57940d5b5fa51243a4a50d7 2024-11-20T19:27:53,636 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/d77dfc76bfca44049052239b8b37baac to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/d77dfc76bfca44049052239b8b37baac 2024-11-20T19:27:53,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9722129a01964b4893db669edc4813ee to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9722129a01964b4893db669edc4813ee 2024-11-20T19:27:53,638 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/68a139a3e7074bef85521fd421cb42c4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/68a139a3e7074bef85521fd421cb42c4 2024-11-20T19:27:53,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/3fe9b634c7c042ba95879dd80888bdfb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/3fe9b634c7c042ba95879dd80888bdfb 2024-11-20T19:27:53,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/136ca4d59b62440f94ce7840e7737f26 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/136ca4d59b62440f94ce7840e7737f26 2024-11-20T19:27:53,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/eec5c84746ee460d9f72e08375ea17fc to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/eec5c84746ee460d9f72e08375ea17fc 2024-11-20T19:27:53,641 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/03d3a06bd77744388a4dce656f21060b to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/03d3a06bd77744388a4dce656f21060b 2024-11-20T19:27:53,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/35dcf9f634e44e66b31bd72028b470a4 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/35dcf9f634e44e66b31bd72028b470a4 2024-11-20T19:27:53,643 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/b3353bedb5a24e9fa94bd6b349c52ef7 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/b3353bedb5a24e9fa94bd6b349c52ef7 2024-11-20T19:27:53,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/0b8d6c4cefa346c684a20aa054f77a8a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/0b8d6c4cefa346c684a20aa054f77a8a 2024-11-20T19:27:53,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/7247f3d7dc084a0da05d8058e82ebd9a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/7247f3d7dc084a0da05d8058e82ebd9a 2024-11-20T19:27:53,645 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/37e6bbabfae04ea2869fab0dbd4e699a to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/37e6bbabfae04ea2869fab0dbd4e699a 2024-11-20T19:27:53,646 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9be1ba3991084cddb4a48ef149fde792 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9be1ba3991084cddb4a48ef149fde792 2024-11-20T19:27:53,647 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/3381aa4e1dc8446d8dfd8c4d83918269 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/3381aa4e1dc8446d8dfd8c4d83918269 2024-11-20T19:27:53,647 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/0654915018f541c28c2c4e05e61151fa to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/0654915018f541c28c2c4e05e61151fa 2024-11-20T19:27:53,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9265f2c75bf34e5f9e1f6492a0466b95 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/9265f2c75bf34e5f9e1f6492a0466b95 2024-11-20T19:27:53,649 DEBUG [StoreCloser-TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/622c1378aa4d4057af226ca74a129310 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/622c1378aa4d4057af226ca74a129310 2024-11-20T19:27:53,652 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/recovered.edits/412.seqid, newMaxSeqId=412, maxSeqId=4 2024-11-20T19:27:53,652 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb. 2024-11-20T19:27:53,653 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1635): Region close journal for d8a34887157cb9c8687afaaeab650abb: 2024-11-20T19:27:53,653 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(170): Closed d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:53,654 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=d8a34887157cb9c8687afaaeab650abb, regionState=CLOSED 2024-11-20T19:27:53,656 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-11-20T19:27:53,656 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; CloseRegionProcedure d8a34887157cb9c8687afaaeab650abb, server=db9c3a6c6492,35979,1732130703276 in 1.1350 sec 2024-11-20T19:27:53,657 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=181, resume processing ppid=180 2024-11-20T19:27:53,657 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d8a34887157cb9c8687afaaeab650abb, UNASSIGN in 1.1380 sec 2024-11-20T19:27:53,658 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-11-20T19:27:53,658 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.1400 sec 2024-11-20T19:27:53,659 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732130873659"}]},"ts":"1732130873659"} 2024-11-20T19:27:53,660 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-20T19:27:53,672 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-20T19:27:53,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.2130 sec 2024-11-20T19:27:54,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-20T19:27:54,565 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-11-20T19:27:54,565 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-20T19:27:54,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:54,567 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=183, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:54,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-20T19:27:54,567 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=183, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:54,569 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,571 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C, FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/recovered.edits] 2024-11-20T19:27:54,572 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/89a215f425714da7a105c105a79c91f6 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/89a215f425714da7a105c105a79c91f6 2024-11-20T19:27:54,573 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f96372874f844b7b85b950655d9f4d4c to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/A/f96372874f844b7b85b950655d9f4d4c 2024-11-20T19:27:54,575 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/ac67f6b09e5141d1981813f87a57a9a3 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/ac67f6b09e5141d1981813f87a57a9a3 2024-11-20T19:27:54,576 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/bbcaa82f8eff4941ba04ff8bab3c46f2 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/B/bbcaa82f8eff4941ba04ff8bab3c46f2 2024-11-20T19:27:54,578 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/446e51eb8ce1433191535effedfe0976 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/446e51eb8ce1433191535effedfe0976 2024-11-20T19:27:54,579 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/f5fabf3068db47968f5ffbf2ec4b6a40 to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/C/f5fabf3068db47968f5ffbf2ec4b6a40 2024-11-20T19:27:54,581 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/recovered.edits/412.seqid to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb/recovered.edits/412.seqid 2024-11-20T19:27:54,582 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/default/TestAcidGuarantees/d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,582 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-20T19:27:54,582 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T19:27:54,583 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-20T19:27:54,585 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112000e09ec36e3f48689c46d9754f195d41_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112000e09ec36e3f48689c46d9754f195d41_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,586 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200be508f4cffd40cdb88e6dce24b10ec2_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411200be508f4cffd40cdb88e6dce24b10ec2_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,588 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202461fce0f1e045719c7bd8719dca335e_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411202461fce0f1e045719c7bd8719dca335e_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,589 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112044ae90fa152e446db3a66d04bc8f8601_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112044ae90fa152e446db3a66d04bc8f8601_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,590 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205f7ee08aac7047e0a0502b64b3968eaf_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411205f7ee08aac7047e0a0502b64b3968eaf_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,591 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207d583607d99143b2a69117d81e5b61e5_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411207d583607d99143b2a69117d81e5b61e5_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,592 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112086b1c496e7c14b62b7a982ffdf4efad8_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112086b1c496e7c14b62b7a982ffdf4efad8_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,592 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120917246d1bf7b47eba8102d20b46274d6_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120917246d1bf7b47eba8102d20b46274d6_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,594 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a053a58893a14dcf86538d476247ce3e_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120a053a58893a14dcf86538d476247ce3e_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,595 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b8218e14c4bc4a6ea944e7e19cc9aa32_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120b8218e14c4bc4a6ea944e7e19cc9aa32_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,595 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bb4a810de1034b7bafe1c375f767c198_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120bb4a810de1034b7bafe1c375f767c198_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,596 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c024178969064940a8751c6d4c14b935_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c024178969064940a8751c6d4c14b935_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,597 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c17da88a25ca46bd9f48ccb6d9ad29d7_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c17da88a25ca46bd9f48ccb6d9ad29d7_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,598 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c734f423b39441cb866c8a99b0795c74_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120c734f423b39441cb866c8a99b0795c74_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,599 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cca2e0951bbb4679a99814b315096b6e_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cca2e0951bbb4679a99814b315096b6e_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,600 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cd01d367b3b147469fd902713575db90_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120cd01d367b3b147469fd902713575db90_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,601 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d30266a988ef4e6ea47cefbc71e7b8ae_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d30266a988ef4e6ea47cefbc71e7b8ae_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,601 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d72e8d7b0996480dae3cf11dc27a27e9_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120d72e8d7b0996480dae3cf11dc27a27e9_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,602 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dac5643565a4433a90b8426f591f3ad4_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dac5643565a4433a90b8426f591f3ad4_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,603 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dd7bf7d9e33f4a01a8cdcff07b2dc927_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120dd7bf7d9e33f4a01a8cdcff07b2dc927_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,604 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f561771c8b0643288e7ede392e91dd2b_d8a34887157cb9c8687afaaeab650abb to hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241120f561771c8b0643288e7ede392e91dd2b_d8a34887157cb9c8687afaaeab650abb 2024-11-20T19:27:54,604 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-20T19:27:54,605 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=183, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:54,607 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-20T19:27:54,609 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-20T19:27:54,610 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=183, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:54,610 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-20T19:27:54,610 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732130874610"}]},"ts":"9223372036854775807"} 2024-11-20T19:27:54,611 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-20T19:27:54,611 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d8a34887157cb9c8687afaaeab650abb, NAME => 'TestAcidGuarantees,,1732130847402.d8a34887157cb9c8687afaaeab650abb.', STARTKEY => '', ENDKEY => ''}] 2024-11-20T19:27:54,611 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-20T19:27:54,611 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732130874611"}]},"ts":"9223372036854775807"} 2024-11-20T19:27:54,612 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-20T19:27:54,650 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=183, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-20T19:27:54,651 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 84 msec 2024-11-20T19:27:54,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36861 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-20T19:27:54,668 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-11-20T19:27:54,678 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=241 (was 241), OpenFileDescriptor=460 (was 462), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=667 (was 651) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3172 (was 3209) 2024-11-20T19:27:54,678 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-20T19:27:54,678 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-20T19:27:54,678 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e541e88 to 127.0.0.1:50476 2024-11-20T19:27:54,678 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:54,678 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-20T19:27:54,678 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1416742036, stopped=false 2024-11-20T19:27:54,679 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=db9c3a6c6492,36861,1732130702494 2024-11-20T19:27:54,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T19:27:54,689 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-20T19:27:54,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:27:54,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-20T19:27:54,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:27:54,690 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T19:27:54,690 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-20T19:27:54,691 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:54,691 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'db9c3a6c6492,35979,1732130703276' ***** 2024-11-20T19:27:54,691 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-20T19:27:54,692 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-20T19:27:54,692 INFO [RS:0;db9c3a6c6492:35979 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-20T19:27:54,692 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-20T19:27:54,692 INFO [RS:0;db9c3a6c6492:35979 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-20T19:27:54,692 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(3579): Received CLOSE for d21e0da06747a4b3da8e29803090bc10 2024-11-20T19:27:54,692 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(1224): stopping server db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:54,692 DEBUG [RS:0;db9c3a6c6492:35979 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:54,692 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-20T19:27:54,692 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-20T19:27:54,692 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-20T19:27:54,692 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-20T19:27:54,693 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing d21e0da06747a4b3da8e29803090bc10, disabling compactions & flushes 2024-11-20T19:27:54,693 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10. 2024-11-20T19:27:54,693 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-20T19:27:54,693 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10. 2024-11-20T19:27:54,693 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10. after waiting 0 ms 2024-11-20T19:27:54,693 DEBUG [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(1603): Online Regions={d21e0da06747a4b3da8e29803090bc10=hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10., 1588230740=hbase:meta,,1.1588230740} 2024-11-20T19:27:54,693 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10. 2024-11-20T19:27:54,693 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing d21e0da06747a4b3da8e29803090bc10 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-20T19:27:54,693 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-20T19:27:54,693 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-20T19:27:54,693 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-20T19:27:54,693 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-20T19:27:54,693 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-20T19:27:54,693 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-20T19:27:54,693 DEBUG [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, d21e0da06747a4b3da8e29803090bc10 2024-11-20T19:27:54,708 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/namespace/d21e0da06747a4b3da8e29803090bc10/.tmp/info/9018123d0213436a8a8fada089fb3f65 is 45, key is default/info:d/1732130709032/Put/seqid=0 2024-11-20T19:27:54,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742588_1764 (size=5037) 2024-11-20T19:27:54,720 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/.tmp/info/288ee640c665467ca3202782e2f8a20f is 143, key is hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10./info:regioninfo/1732130708881/Put/seqid=0 2024-11-20T19:27:54,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742589_1765 (size=7725) 2024-11-20T19:27:54,751 INFO [regionserver/db9c3a6c6492:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T19:27:54,893 DEBUG [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, d21e0da06747a4b3da8e29803090bc10 2024-11-20T19:27:55,094 DEBUG [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, d21e0da06747a4b3da8e29803090bc10 2024-11-20T19:27:55,116 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/namespace/d21e0da06747a4b3da8e29803090bc10/.tmp/info/9018123d0213436a8a8fada089fb3f65 2024-11-20T19:27:55,118 INFO [regionserver/db9c3a6c6492:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-20T19:27:55,118 INFO [regionserver/db9c3a6c6492:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-20T19:27:55,119 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/namespace/d21e0da06747a4b3da8e29803090bc10/.tmp/info/9018123d0213436a8a8fada089fb3f65 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/namespace/d21e0da06747a4b3da8e29803090bc10/info/9018123d0213436a8a8fada089fb3f65 2024-11-20T19:27:55,122 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/namespace/d21e0da06747a4b3da8e29803090bc10/info/9018123d0213436a8a8fada089fb3f65, entries=2, sequenceid=6, filesize=4.9 K 2024-11-20T19:27:55,123 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for d21e0da06747a4b3da8e29803090bc10 in 430ms, sequenceid=6, compaction requested=false 2024-11-20T19:27:55,126 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/namespace/d21e0da06747a4b3da8e29803090bc10/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-20T19:27:55,126 INFO [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10. 2024-11-20T19:27:55,126 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for d21e0da06747a4b3da8e29803090bc10: 2024-11-20T19:27:55,127 DEBUG [RS_CLOSE_REGION-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732130707546.d21e0da06747a4b3da8e29803090bc10. 2024-11-20T19:27:55,131 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/.tmp/info/288ee640c665467ca3202782e2f8a20f 2024-11-20T19:27:55,147 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/.tmp/rep_barrier/1844b37e496d4aa780859054d8fc0358 is 102, key is TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da./rep_barrier:/1732130737991/DeleteFamily/seqid=0 2024-11-20T19:27:55,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742590_1766 (size=6025) 2024-11-20T19:27:55,150 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/.tmp/rep_barrier/1844b37e496d4aa780859054d8fc0358 2024-11-20T19:27:55,166 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/.tmp/table/ffe66d4ed3ca4ecd83267413c2159030 is 96, key is TestAcidGuarantees,,1732130709406.6aad06303ed006b601a1faa1a93ab5da./table:/1732130737991/DeleteFamily/seqid=0 2024-11-20T19:27:55,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742591_1767 (size=5942) 2024-11-20T19:27:55,294 DEBUG [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T19:27:55,494 DEBUG [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-20T19:27:55,569 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/.tmp/table/ffe66d4ed3ca4ecd83267413c2159030 2024-11-20T19:27:55,572 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/.tmp/info/288ee640c665467ca3202782e2f8a20f as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/info/288ee640c665467ca3202782e2f8a20f 2024-11-20T19:27:55,574 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/info/288ee640c665467ca3202782e2f8a20f, entries=22, sequenceid=93, filesize=7.5 K 2024-11-20T19:27:55,575 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/.tmp/rep_barrier/1844b37e496d4aa780859054d8fc0358 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/rep_barrier/1844b37e496d4aa780859054d8fc0358 2024-11-20T19:27:55,578 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/rep_barrier/1844b37e496d4aa780859054d8fc0358, entries=6, sequenceid=93, filesize=5.9 K 2024-11-20T19:27:55,578 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/.tmp/table/ffe66d4ed3ca4ecd83267413c2159030 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/table/ffe66d4ed3ca4ecd83267413c2159030 2024-11-20T19:27:55,581 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/table/ffe66d4ed3ca4ecd83267413c2159030, entries=9, sequenceid=93, filesize=5.8 K 2024-11-20T19:27:55,581 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 888ms, sequenceid=93, compaction requested=false 2024-11-20T19:27:55,585 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-20T19:27:55,585 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-20T19:27:55,585 INFO [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-20T19:27:55,585 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-20T19:27:55,586 DEBUG [RS_CLOSE_META-regionserver/db9c3a6c6492:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-20T19:27:55,694 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(1250): stopping server db9c3a6c6492,35979,1732130703276; all regions closed. 2024-11-20T19:27:55,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741834_1010 (size=26050) 2024-11-20T19:27:55,700 DEBUG [RS:0;db9c3a6c6492:35979 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/oldWALs 2024-11-20T19:27:55,700 INFO [RS:0;db9c3a6c6492:35979 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL db9c3a6c6492%2C35979%2C1732130703276.meta:.meta(num 1732130707259) 2024-11-20T19:27:55,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741832_1008 (size=17933346) 2024-11-20T19:27:55,703 DEBUG [RS:0;db9c3a6c6492:35979 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/oldWALs 2024-11-20T19:27:55,703 INFO [RS:0;db9c3a6c6492:35979 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL db9c3a6c6492%2C35979%2C1732130703276:(num 1732130706279) 2024-11-20T19:27:55,703 DEBUG [RS:0;db9c3a6c6492:35979 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:55,703 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.LeaseManager(133): Closed leases 2024-11-20T19:27:55,704 INFO [RS:0;db9c3a6c6492:35979 {}] hbase.ChoreService(370): Chore service for: regionserver/db9c3a6c6492:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-20T19:27:55,704 INFO [regionserver/db9c3a6c6492:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T19:27:55,704 INFO [RS:0;db9c3a6c6492:35979 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35979 2024-11-20T19:27:55,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-20T19:27:55,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/db9c3a6c6492,35979,1732130703276 2024-11-20T19:27:55,758 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [db9c3a6c6492,35979,1732130703276] 2024-11-20T19:27:55,758 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing db9c3a6c6492,35979,1732130703276; numProcessing=1 2024-11-20T19:27:55,766 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/db9c3a6c6492,35979,1732130703276 already deleted, retry=false 2024-11-20T19:27:55,766 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; db9c3a6c6492,35979,1732130703276 expired; onlineServers=0 2024-11-20T19:27:55,766 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'db9c3a6c6492,36861,1732130702494' ***** 2024-11-20T19:27:55,766 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-20T19:27:55,767 DEBUG [M:0;db9c3a6c6492:36861 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b50fbf3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=db9c3a6c6492/172.17.0.2:0 2024-11-20T19:27:55,767 INFO [M:0;db9c3a6c6492:36861 {}] regionserver.HRegionServer(1224): stopping server db9c3a6c6492,36861,1732130702494 2024-11-20T19:27:55,767 INFO [M:0;db9c3a6c6492:36861 {}] regionserver.HRegionServer(1250): stopping server db9c3a6c6492,36861,1732130702494; all regions closed. 2024-11-20T19:27:55,767 DEBUG [M:0;db9c3a6c6492:36861 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-20T19:27:55,767 DEBUG [M:0;db9c3a6c6492:36861 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-20T19:27:55,767 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-20T19:27:55,767 DEBUG [M:0;db9c3a6c6492:36861 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-20T19:27:55,767 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster-HFileCleaner.small.0-1732130705947 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9c3a6c6492:0:becomeActiveMaster-HFileCleaner.small.0-1732130705947,5,FailOnTimeoutGroup] 2024-11-20T19:27:55,767 DEBUG [master/db9c3a6c6492:0:becomeActiveMaster-HFileCleaner.large.0-1732130705947 {}] cleaner.HFileCleaner(306): Exit Thread[master/db9c3a6c6492:0:becomeActiveMaster-HFileCleaner.large.0-1732130705947,5,FailOnTimeoutGroup] 2024-11-20T19:27:55,767 INFO [M:0;db9c3a6c6492:36861 {}] hbase.ChoreService(370): Chore service for: master/db9c3a6c6492:0 had [] on shutdown 2024-11-20T19:27:55,767 DEBUG [M:0;db9c3a6c6492:36861 {}] master.HMaster(1733): Stopping service threads 2024-11-20T19:27:55,768 INFO [M:0;db9c3a6c6492:36861 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-20T19:27:55,768 ERROR [M:0;db9c3a6c6492:36861 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-5,5,PEWorkerGroup] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:34097 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:34097,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-20T19:27:55,769 INFO [M:0;db9c3a6c6492:36861 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-20T19:27:55,769 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-20T19:27:55,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-20T19:27:55,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-20T19:27:55,775 DEBUG [M:0;db9c3a6c6492:36861 {}] zookeeper.ZKUtil(347): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-20T19:27:55,775 WARN [M:0;db9c3a6c6492:36861 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-20T19:27:55,775 INFO [M:0;db9c3a6c6492:36861 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-20T19:27:55,775 INFO [M:0;db9c3a6c6492:36861 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-20T19:27:55,775 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-20T19:27:55,775 DEBUG [M:0;db9c3a6c6492:36861 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-20T19:27:55,775 INFO [M:0;db9c3a6c6492:36861 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T19:27:55,775 DEBUG [M:0;db9c3a6c6492:36861 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T19:27:55,775 DEBUG [M:0;db9c3a6c6492:36861 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-20T19:27:55,775 DEBUG [M:0;db9c3a6c6492:36861 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T19:27:55,775 INFO [M:0;db9c3a6c6492:36861 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=803.12 KB heapSize=989.93 KB 2024-11-20T19:27:55,789 DEBUG [M:0;db9c3a6c6492:36861 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7a11b11b6af24fed88a16257aad18e13 is 82, key is hbase:meta,,1/info:regioninfo/1732130707381/Put/seqid=0 2024-11-20T19:27:55,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742592_1768 (size=5672) 2024-11-20T19:27:55,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T19:27:55,858 INFO [RS:0;db9c3a6c6492:35979 {}] regionserver.HRegionServer(1307): Exiting; stopping=db9c3a6c6492,35979,1732130703276; zookeeper connection closed. 2024-11-20T19:27:55,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35979-0x1015afea3c50001, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T19:27:55,859 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@55b7dccc {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@55b7dccc 2024-11-20T19:27:55,859 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-20T19:27:56,193 INFO [M:0;db9c3a6c6492:36861 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2310 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7a11b11b6af24fed88a16257aad18e13 2024-11-20T19:27:56,220 DEBUG [M:0;db9c3a6c6492:36861 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7c399928b26d472d8e615eb6d5db39be is 2284, key is \x00\x00\x00\x00\x00\x00\x00\x9A/proc:d/1732130849038/Put/seqid=0 2024-11-20T19:27:56,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742593_1769 (size=45749) 2024-11-20T19:27:56,229 INFO [M:0;db9c3a6c6492:36861 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=802.56 KB at sequenceid=2310 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7c399928b26d472d8e615eb6d5db39be 2024-11-20T19:27:56,232 INFO [M:0;db9c3a6c6492:36861 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7c399928b26d472d8e615eb6d5db39be 2024-11-20T19:27:56,249 DEBUG [M:0;db9c3a6c6492:36861 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/585b52f75db841aaaeec5742c87c2db9 is 69, key is db9c3a6c6492,35979,1732130703276/rs:state/1732130706031/Put/seqid=0 2024-11-20T19:27:56,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073742594_1770 (size=5156) 2024-11-20T19:27:56,660 INFO [M:0;db9c3a6c6492:36861 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2310 (bloomFilter=true), to=hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/585b52f75db841aaaeec5742c87c2db9 2024-11-20T19:27:56,664 DEBUG [M:0;db9c3a6c6492:36861 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7a11b11b6af24fed88a16257aad18e13 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7a11b11b6af24fed88a16257aad18e13 2024-11-20T19:27:56,666 INFO [M:0;db9c3a6c6492:36861 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7a11b11b6af24fed88a16257aad18e13, entries=8, sequenceid=2310, filesize=5.5 K 2024-11-20T19:27:56,667 DEBUG [M:0;db9c3a6c6492:36861 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7c399928b26d472d8e615eb6d5db39be as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7c399928b26d472d8e615eb6d5db39be 2024-11-20T19:27:56,670 INFO [M:0;db9c3a6c6492:36861 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7c399928b26d472d8e615eb6d5db39be 2024-11-20T19:27:56,670 INFO [M:0;db9c3a6c6492:36861 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7c399928b26d472d8e615eb6d5db39be, entries=183, sequenceid=2310, filesize=44.7 K 2024-11-20T19:27:56,670 DEBUG [M:0;db9c3a6c6492:36861 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/585b52f75db841aaaeec5742c87c2db9 as hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/585b52f75db841aaaeec5742c87c2db9 2024-11-20T19:27:56,673 INFO [M:0;db9c3a6c6492:36861 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34097/user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/585b52f75db841aaaeec5742c87c2db9, entries=1, sequenceid=2310, filesize=5.0 K 2024-11-20T19:27:56,673 INFO [M:0;db9c3a6c6492:36861 {}] regionserver.HRegion(3040): Finished flush of dataSize ~803.12 KB/822393, heapSize ~989.63 KB/1013384, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 898ms, sequenceid=2310, compaction requested=false 2024-11-20T19:27:56,675 INFO [M:0;db9c3a6c6492:36861 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-20T19:27:56,675 DEBUG [M:0;db9c3a6c6492:36861 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-20T19:27:56,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46289 is added to blk_1073741830_1006 (size=973798) 2024-11-20T19:27:56,676 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(743): complete file /user/jenkins/test-data/aad4134d-d212-9e95-1cef-40f623577c0d/MasterData/WALs/db9c3a6c6492,36861,1732130702494/db9c3a6c6492%2C36861%2C1732130702494.1732130705308 not finished, retry = 0 2024-11-20T19:27:56,777 INFO [M:0;db9c3a6c6492:36861 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-20T19:27:56,777 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-20T19:27:56,777 INFO [M:0;db9c3a6c6492:36861 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36861 2024-11-20T19:27:56,816 DEBUG [M:0;db9c3a6c6492:36861 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/db9c3a6c6492,36861,1732130702494 already deleted, retry=false 2024-11-20T19:27:56,925 INFO [M:0;db9c3a6c6492:36861 {}] regionserver.HRegionServer(1307): Exiting; stopping=db9c3a6c6492,36861,1732130702494; zookeeper connection closed. 2024-11-20T19:27:56,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T19:27:56,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36861-0x1015afea3c50000, quorum=127.0.0.1:50476, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-20T19:27:56,930 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-20T19:27:56,933 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T19:27:56,933 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T19:27:56,933 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T19:27:56,933 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/hadoop.log.dir/,STOPPED} 2024-11-20T19:27:56,937 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-20T19:27:56,937 WARN [BP-241231531-172.17.0.2-1732130698820 heartbeating to localhost/127.0.0.1:34097 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-20T19:27:56,937 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-20T19:27:56,937 WARN [BP-241231531-172.17.0.2-1732130698820 heartbeating to localhost/127.0.0.1:34097 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-241231531-172.17.0.2-1732130698820 (Datanode Uuid c3d568ab-dda1-4760-b2a8-9a923557015c) service to localhost/127.0.0.1:34097 2024-11-20T19:27:56,941 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/cluster_11f679c0-5e90-8650-28a2-21724370d870/dfs/data/data1/current/BP-241231531-172.17.0.2-1732130698820 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T19:27:56,941 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/cluster_11f679c0-5e90-8650-28a2-21724370d870/dfs/data/data2/current/BP-241231531-172.17.0.2-1732130698820 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-20T19:27:56,941 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-20T19:27:56,951 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-20T19:27:56,952 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-20T19:27:56,952 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-20T19:27:56,952 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-20T19:27:56,952 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/17bd2c39-8e6a-5562-3c34-692334dcd249/hadoop.log.dir/,STOPPED} 2024-11-20T19:27:56,976 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-20T19:27:57,175 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down